diff --git a/v1.13/qiniucloud/PRODUCT.yaml b/v1.13/qiniucloud/PRODUCT.yaml new file mode 100644 index 0000000000..e723856216 --- /dev/null +++ b/v1.13/qiniucloud/PRODUCT.yaml @@ -0,0 +1,8 @@ +vendor: Qiniu Cloud +name: Qiniu Enterprise Platform +version: v4.2.0 +website_url: https://www.qiniu.com/products/kirk +documentation_url: http://kirk-docs.qiniu.com +product_logo_url: https://www.cncf.io/wp-content/uploads/2018/09/qiniu.svg +type: distribution +description: Qiniu Enterprise Platform is a multi-cloud support platform based on Kubernetes and Docker for application lifecycle management with high availability and high stability. This platform can accelerate time-to-market,improve cluster utilization,simplify the operation and maintenance. \ No newline at end of file diff --git a/v1.13/qiniucloud/README.md b/v1.13/qiniucloud/README.md new file mode 100644 index 0000000000..4daa39ed21 --- /dev/null +++ b/v1.13/qiniucloud/README.md @@ -0,0 +1,27 @@ +#How To Reproduce: + +##Create Cluster + +Create a Cluster for Qiniu Cloud Container Engine. + +After the creation completed, lauch the Kubernetes e2e conformance test. + +##Run Conformance Test + +Run command as blew: + +` +go get -u -v github.com/heptio/sonobuoy +`
+` +sonobuoy run +`
+` +sonobuoy status +`
+` +sonobuoy logs +`
+ +Check sonobuoy's logs for the line `no-exit was specified, sonobuoy is now blocking`,which indicates that the e2e test is finished.
+Retrieve `e2e.log` and `junit_01.xml` file out of the tar file. diff --git a/v1.13/qiniucloud/e2e.log b/v1.13/qiniucloud/e2e.log new file mode 100644 index 0000000000..7de3719420 --- /dev/null +++ b/v1.13/qiniucloud/e2e.log @@ -0,0 +1,11256 @@ +I0618 07:13:17.418338 16 test_context.go:359] Using a temporary kubeconfig file from in-cluster config : /tmp/kubeconfig-656024001 +I0618 07:13:17.418547 16 e2e.go:224] Starting e2e run "8b392b75-9198-11e9-bbf5-0e74dabf3615" on Ginkgo node 1 +Running Suite: Kubernetes e2e suite +=================================== +Random Seed: 1560841996 - Will randomize all specs +Will run 201 of 2161 specs + +Jun 18 07:13:17.582: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +Jun 18 07:13:17.584: INFO: Waiting up to 30m0s for all (but 0) nodes to be schedulable +Jun 18 07:13:17.604: INFO: Waiting up to 10m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready +Jun 18 07:13:18.642: INFO: 51 / 51 pods in namespace 'kube-system' are running and ready (1 seconds elapsed) +Jun 18 07:13:18.642: INFO: expected 18 pod replicas in namespace 'kube-system', 18 are Running and Ready. +Jun 18 07:13:18.642: INFO: Waiting up to 5m0s for all daemonsets in namespace 'kube-system' to start +Jun 18 07:13:18.660: INFO: 6 / 6 pods ready in namespace 'kube-system' in daemonset 'calico-node' (0 seconds elapsed) +Jun 18 07:13:18.660: INFO: 1 / 1 pods ready in namespace 'kube-system' in daemonset 'contour' (0 seconds elapsed) +Jun 18 07:13:18.660: INFO: 6 / 6 pods ready in namespace 'kube-system' in daemonset 'kube-proxy' (0 seconds elapsed) +Jun 18 07:13:18.660: INFO: 6 / 6 pods ready in namespace 'kube-system' in daemonset 'logkit-poc' (0 seconds elapsed) +Jun 18 07:13:18.660: INFO: 6 / 6 pods ready in namespace 'kube-system' in daemonset 'prometheus-operator-prometheus-node-exporter' (0 seconds elapsed) +Jun 18 07:13:18.660: INFO: e2e test version: v1.13.5 +Jun 18 07:13:18.662: INFO: kube-apiserver version: v1.13.5 +SSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Garbage collector + should delete pods created by rc when not orphaning [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-api-machinery] Garbage collector + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:13:18.662: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename gc +Jun 18 07:13:20.534: INFO: Found PodSecurityPolicies; assuming PodSecurityPolicy is enabled. +Jun 18 07:13:20.548: INFO: Found ClusterRoles; assuming RBAC is enabled. +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-gc-b67fw +STEP: Waiting for a default service account to be provisioned in namespace +[It] should delete pods created by rc when not orphaning [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: create the rc +STEP: delete the rc +STEP: wait for all pods to be garbage collected +STEP: Gathering metrics +W0618 07:13:32.530765 16 metrics_grabber.go:81] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled. +Jun 18 07:13:32.530: INFO: For apiserver_request_count: +For apiserver_request_latencies_summary: +For etcd_helper_cache_entry_count: +For etcd_helper_cache_hit_count: +For etcd_helper_cache_miss_count: +For etcd_request_cache_add_latencies_summary: +For etcd_request_cache_get_latencies_summary: +For etcd_request_latencies_summary: +For garbage_collector_attempt_to_delete_queue_latency: +For garbage_collector_attempt_to_delete_work_duration: +For garbage_collector_attempt_to_orphan_queue_latency: +For garbage_collector_attempt_to_orphan_work_duration: +For garbage_collector_dirty_processing_latency_microseconds: +For garbage_collector_event_processing_latency_microseconds: +For garbage_collector_graph_changes_queue_latency: +For garbage_collector_graph_changes_work_duration: +For garbage_collector_orphan_processing_latency_microseconds: +For namespace_queue_latency: +For namespace_queue_latency_sum: +For namespace_queue_latency_count: +For namespace_retries: +For namespace_work_duration: +For namespace_work_duration_sum: +For namespace_work_duration_count: +For function_duration_seconds: +For errors_total: +For evicted_pods_total: + +[AfterEach] [sig-api-machinery] Garbage collector + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:13:32.530: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-gc-b67fw" for this suite. +Jun 18 07:13:40.576: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:13:40.689: INFO: namespace: e2e-tests-gc-b67fw, resource: bindings, ignored listing per whitelist +Jun 18 07:13:41.513: INFO: namespace e2e-tests-gc-b67fw deletion completed in 8.974732305s + +• [SLOW TEST:22.851 seconds] +[sig-api-machinery] Garbage collector +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + should delete pods created by rc when not orphaning [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl cluster-info + should check if Kubernetes master services is included in cluster-info [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:13:41.513: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-nwmnb +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[It] should check if Kubernetes master services is included in cluster-info [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: validating cluster-info +Jun 18 07:13:41.700: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 cluster-info' +Jun 18 07:13:42.721: INFO: stderr: "" +Jun 18 07:13:42.721: INFO: stdout: "\x1b[0;32mKubernetes master\x1b[0m is running at \x1b[0;33mhttps://169.169.0.1:443\x1b[0m\n\x1b[0;32mKubeDNS\x1b[0m is running at \x1b[0;33mhttps://169.169.0.1:443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy\x1b[0m\n\nTo further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:13:42.721: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-nwmnb" for this suite. +Jun 18 07:13:50.735: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:13:50.845: INFO: namespace: e2e-tests-kubectl-nwmnb, resource: bindings, ignored listing per whitelist +Jun 18 07:13:51.523: INFO: namespace e2e-tests-kubectl-nwmnb deletion completed in 8.798670095s + +• [SLOW TEST:10.010 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Kubectl cluster-info + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should check if Kubernetes master services is included in cluster-info [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSS +------------------------------ +[sig-storage] Subpath Atomic writer volumes + should support subpaths with projected pod [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Subpath + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:13:51.523: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename subpath +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-subpath-bwjv9 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] Atomic writer volumes + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38 +STEP: Setting up data +[It] should support subpaths with projected pod [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating pod pod-subpath-test-projected-nc5r +STEP: Creating a pod to test atomic-volume-subpath +Jun 18 07:13:51.746: INFO: Waiting up to 5m0s for pod "pod-subpath-test-projected-nc5r" in namespace "e2e-tests-subpath-bwjv9" to be "success or failure" +Jun 18 07:13:51.749: INFO: Pod "pod-subpath-test-projected-nc5r": Phase="Pending", Reason="", readiness=false. Elapsed: 3.456035ms +Jun 18 07:13:54.519: INFO: Pod "pod-subpath-test-projected-nc5r": Phase="Pending", Reason="", readiness=false. Elapsed: 2.77299769s +Jun 18 07:13:56.521: INFO: Pod "pod-subpath-test-projected-nc5r": Phase="Running", Reason="", readiness=false. Elapsed: 4.775573349s +Jun 18 07:13:58.531: INFO: Pod "pod-subpath-test-projected-nc5r": Phase="Running", Reason="", readiness=false. Elapsed: 6.785172866s +Jun 18 07:14:00.534: INFO: Pod "pod-subpath-test-projected-nc5r": Phase="Running", Reason="", readiness=false. Elapsed: 8.788357868s +Jun 18 07:14:02.751: INFO: Pod "pod-subpath-test-projected-nc5r": Phase="Running", Reason="", readiness=false. Elapsed: 11.005024266s +Jun 18 07:14:04.754: INFO: Pod "pod-subpath-test-projected-nc5r": Phase="Running", Reason="", readiness=false. Elapsed: 13.007758892s +Jun 18 07:14:06.756: INFO: Pod "pod-subpath-test-projected-nc5r": Phase="Running", Reason="", readiness=false. Elapsed: 15.01041066s +Jun 18 07:14:09.532: INFO: Pod "pod-subpath-test-projected-nc5r": Phase="Running", Reason="", readiness=false. Elapsed: 17.785686828s +Jun 18 07:14:11.535: INFO: Pod "pod-subpath-test-projected-nc5r": Phase="Running", Reason="", readiness=false. Elapsed: 19.788578788s +Jun 18 07:14:13.541: INFO: Pod "pod-subpath-test-projected-nc5r": Phase="Running", Reason="", readiness=false. Elapsed: 21.794815424s +Jun 18 07:14:15.544: INFO: Pod "pod-subpath-test-projected-nc5r": Phase="Succeeded", Reason="", readiness=false. Elapsed: 23.797894655s +STEP: Saw pod success +Jun 18 07:14:15.544: INFO: Pod "pod-subpath-test-projected-nc5r" satisfied condition "success or failure" +Jun 18 07:14:15.547: INFO: Trying to get logs from node node5 pod pod-subpath-test-projected-nc5r container test-container-subpath-projected-nc5r: +STEP: delete the pod +Jun 18 07:14:15.564: INFO: Waiting for pod pod-subpath-test-projected-nc5r to disappear +Jun 18 07:14:15.566: INFO: Pod pod-subpath-test-projected-nc5r no longer exists +STEP: Deleting pod pod-subpath-test-projected-nc5r +Jun 18 07:14:15.566: INFO: Deleting pod "pod-subpath-test-projected-nc5r" in namespace "e2e-tests-subpath-bwjv9" +[AfterEach] [sig-storage] Subpath + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:14:15.568: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-subpath-bwjv9" for this suite. +Jun 18 07:14:23.582: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:14:23.609: INFO: namespace: e2e-tests-subpath-bwjv9, resource: bindings, ignored listing per whitelist +Jun 18 07:14:24.556: INFO: namespace e2e-tests-subpath-bwjv9 deletion completed in 8.984293182s + +• [SLOW TEST:33.033 seconds] +[sig-storage] Subpath +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22 + Atomic writer volumes + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34 + should support subpaths with projected pod [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +S +------------------------------ +[sig-storage] EmptyDir volumes + should support (root,0644,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:14:24.556: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename emptydir +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-xj65n +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (root,0644,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test emptydir 0644 on tmpfs +Jun 18 07:14:25.526: INFO: Waiting up to 5m0s for pod "pod-b4287c6f-9198-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-emptydir-xj65n" to be "success or failure" +Jun 18 07:14:25.534: INFO: Pod "pod-b4287c6f-9198-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 8.187802ms +Jun 18 07:14:27.537: INFO: Pod "pod-b4287c6f-9198-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.010850601s +STEP: Saw pod success +Jun 18 07:14:27.537: INFO: Pod "pod-b4287c6f-9198-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:14:27.539: INFO: Trying to get logs from node node5 pod pod-b4287c6f-9198-11e9-bbf5-0e74dabf3615 container test-container: +STEP: delete the pod +Jun 18 07:14:27.553: INFO: Waiting for pod pod-b4287c6f-9198-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:14:27.555: INFO: Pod pod-b4287c6f-9198-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:14:27.555: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-emptydir-xj65n" for this suite. +Jun 18 07:14:35.572: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:14:35.666: INFO: namespace: e2e-tests-emptydir-xj65n, resource: bindings, ignored listing per whitelist +Jun 18 07:14:36.675: INFO: namespace e2e-tests-emptydir-xj65n deletion completed in 9.116724664s + +• [SLOW TEST:12.119 seconds] +[sig-storage] EmptyDir volumes +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40 + should support (root,0644,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSS +------------------------------ +[sig-storage] Projected secret + should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected secret + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:14:36.675: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-nkkjh +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating projection with secret that has name projected-secret-test-bb6a3ee6-9198-11e9-bbf5-0e74dabf3615 +STEP: Creating a pod to test consume secrets +Jun 18 07:14:37.695: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-bb6ab945-9198-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-projected-nkkjh" to be "success or failure" +Jun 18 07:14:37.698: INFO: Pod "pod-projected-secrets-bb6ab945-9198-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.680265ms +Jun 18 07:14:39.700: INFO: Pod "pod-projected-secrets-bb6ab945-9198-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.004828344s +STEP: Saw pod success +Jun 18 07:14:39.700: INFO: Pod "pod-projected-secrets-bb6ab945-9198-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:14:39.702: INFO: Trying to get logs from node node5 pod pod-projected-secrets-bb6ab945-9198-11e9-bbf5-0e74dabf3615 container projected-secret-volume-test: +STEP: delete the pod +Jun 18 07:14:39.723: INFO: Waiting for pod pod-projected-secrets-bb6ab945-9198-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:14:39.726: INFO: Pod pod-projected-secrets-bb6ab945-9198-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] Projected secret + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:14:39.726: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-nkkjh" for this suite. +Jun 18 07:14:47.739: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:14:48.575: INFO: namespace: e2e-tests-projected-nkkjh, resource: bindings, ignored listing per whitelist +Jun 18 07:14:49.572: INFO: namespace e2e-tests-projected-nkkjh deletion completed in 9.842288583s + +• [SLOW TEST:12.897 seconds] +[sig-storage] Projected secret +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:34 + should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSS +------------------------------ +[sig-node] ConfigMap + should be consumable via the environment [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-node] ConfigMap + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:14:49.572: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename configmap +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-configmap-8cjbf +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable via the environment [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap e2e-tests-configmap-8cjbf/configmap-test-c3106554-9198-11e9-bbf5-0e74dabf3615 +STEP: Creating a pod to test consume configMaps +Jun 18 07:14:50.542: INFO: Waiting up to 5m0s for pod "pod-configmaps-c31145be-9198-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-configmap-8cjbf" to be "success or failure" +Jun 18 07:14:50.546: INFO: Pod "pod-configmaps-c31145be-9198-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 4.377793ms +Jun 18 07:14:52.549: INFO: Pod "pod-configmaps-c31145be-9198-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.007411873s +STEP: Saw pod success +Jun 18 07:14:52.549: INFO: Pod "pod-configmaps-c31145be-9198-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:14:52.552: INFO: Trying to get logs from node node5 pod pod-configmaps-c31145be-9198-11e9-bbf5-0e74dabf3615 container env-test: +STEP: delete the pod +Jun 18 07:14:52.586: INFO: Waiting for pod pod-configmaps-c31145be-9198-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:14:52.591: INFO: Pod pod-configmaps-c31145be-9198-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-node] ConfigMap + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:14:52.591: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-configmap-8cjbf" for this suite. +Jun 18 07:15:00.606: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:15:00.697: INFO: namespace: e2e-tests-configmap-8cjbf, resource: bindings, ignored listing per whitelist +Jun 18 07:15:00.919: INFO: namespace e2e-tests-configmap-8cjbf deletion completed in 8.323573912s + +• [SLOW TEST:11.347 seconds] +[sig-node] ConfigMap +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap.go:31 + should be consumable via the environment [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSS +------------------------------ +[sig-network] Networking Granular Checks: Pods + should function for node-pod communication: http [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-network] Networking + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:15:00.920: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename pod-network-test +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-pod-network-test-zwcbg +STEP: Waiting for a default service account to be provisioned in namespace +[It] should function for node-pod communication: http [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Performing setup for networking test in namespace e2e-tests-pod-network-test-zwcbg +STEP: creating a selector +STEP: Creating the service pods in kubernetes +Jun 18 07:15:01.515: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable +STEP: Creating test pods +Jun 18 07:15:31.643: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://171.171.166.184:8080/hostName | grep -v '^\s*$'] Namespace:e2e-tests-pod-network-test-zwcbg PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +Jun 18 07:15:31.643: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +Jun 18 07:15:31.738: INFO: Found all expected endpoints: [netserver-0] +Jun 18 07:15:31.741: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://171.171.135.55:8080/hostName | grep -v '^\s*$'] Namespace:e2e-tests-pod-network-test-zwcbg PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +Jun 18 07:15:31.741: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +Jun 18 07:15:31.813: INFO: Found all expected endpoints: [netserver-1] +Jun 18 07:15:31.818: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://171.171.104.11:8080/hostName | grep -v '^\s*$'] Namespace:e2e-tests-pod-network-test-zwcbg PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +Jun 18 07:15:31.819: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +Jun 18 07:15:32.594: INFO: Found all expected endpoints: [netserver-2] +Jun 18 07:15:32.596: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://171.171.3.90:8080/hostName | grep -v '^\s*$'] Namespace:e2e-tests-pod-network-test-zwcbg PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +Jun 18 07:15:32.596: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +Jun 18 07:15:32.673: INFO: Found all expected endpoints: [netserver-3] +Jun 18 07:15:32.676: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://171.171.33.167:8080/hostName | grep -v '^\s*$'] Namespace:e2e-tests-pod-network-test-zwcbg PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +Jun 18 07:15:32.676: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +Jun 18 07:15:32.750: INFO: Found all expected endpoints: [netserver-4] +[AfterEach] [sig-network] Networking + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:15:32.750: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-pod-network-test-zwcbg" for this suite. +Jun 18 07:16:03.549: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:16:03.678: INFO: namespace: e2e-tests-pod-network-test-zwcbg, resource: bindings, ignored listing per whitelist +Jun 18 07:16:05.567: INFO: namespace e2e-tests-pod-network-test-zwcbg deletion completed in 32.813284928s + +• [SLOW TEST:64.647 seconds] +[sig-network] Networking +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:25 + Granular Checks: Pods + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:28 + should function for node-pod communication: http [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSS +------------------------------ +[sig-storage] Projected secret + should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected secret + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:16:05.567: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-bfbwh +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating secret with name projected-secret-test-f05c8851-9198-11e9-bbf5-0e74dabf3615 +STEP: Creating a pod to test consume secrets +Jun 18 07:16:06.579: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-f05ffeb4-9198-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-projected-bfbwh" to be "success or failure" +Jun 18 07:16:06.588: INFO: Pod "pod-projected-secrets-f05ffeb4-9198-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 9.563814ms +Jun 18 07:16:08.649: INFO: Pod "pod-projected-secrets-f05ffeb4-9198-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.070459754s +Jun 18 07:16:10.673: INFO: Pod "pod-projected-secrets-f05ffeb4-9198-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 4.094405812s +Jun 18 07:16:12.676: INFO: Pod "pod-projected-secrets-f05ffeb4-9198-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.096970808s +STEP: Saw pod success +Jun 18 07:16:12.676: INFO: Pod "pod-projected-secrets-f05ffeb4-9198-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:16:12.677: INFO: Trying to get logs from node node5 pod pod-projected-secrets-f05ffeb4-9198-11e9-bbf5-0e74dabf3615 container secret-volume-test: +STEP: delete the pod +Jun 18 07:16:12.692: INFO: Waiting for pod pod-projected-secrets-f05ffeb4-9198-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:16:12.709: INFO: Pod pod-projected-secrets-f05ffeb4-9198-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] Projected secret + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:16:12.709: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-bfbwh" for this suite. +Jun 18 07:16:25.541: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:16:26.622: INFO: namespace: e2e-tests-projected-bfbwh, resource: bindings, ignored listing per whitelist +Jun 18 07:16:26.644: INFO: namespace e2e-tests-projected-bfbwh deletion completed in 13.931196307s + +• [SLOW TEST:21.077 seconds] +[sig-storage] Projected secret +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:34 + should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] Deployment + deployment should support rollover [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] Deployment + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:16:26.645: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename deployment +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-deployment-qks7l +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Deployment + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:65 +[It] deployment should support rollover [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +Jun 18 07:16:28.554: INFO: Pod name rollover-pod: Found 0 pods out of 1 +Jun 18 07:16:33.599: INFO: Pod name rollover-pod: Found 1 pods out of 1 +STEP: ensuring each pod is running +Jun 18 07:16:33.599: INFO: Waiting for pods owned by replica set "test-rollover-controller" to become ready +Jun 18 07:16:35.601: INFO: Creating deployment "test-rollover-deployment" +Jun 18 07:16:35.614: INFO: Make sure deployment "test-rollover-deployment" performs scaling operations +Jun 18 07:16:37.622: INFO: Check revision of new replica set for deployment "test-rollover-deployment" +Jun 18 07:16:37.634: INFO: Ensure that both replica sets have 1 created replica +Jun 18 07:16:37.638: INFO: Rollover old replica sets for deployment "test-rollover-deployment" with new image update +Jun 18 07:16:37.642: INFO: Updating deployment test-rollover-deployment +Jun 18 07:16:37.642: INFO: Wait deployment "test-rollover-deployment" to be observed by the deployment controller +Jun 18 07:16:39.669: INFO: Wait for revision update of deployment "test-rollover-deployment" to 2 +Jun 18 07:16:40.521: INFO: Make sure deployment "test-rollover-deployment" is complete +Jun 18 07:16:40.533: INFO: all replica sets need to contain the pod-template-hash label +Jun 18 07:16:40.533: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:1, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696438995, loc:(*time.Location)(0x7b57be0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696438995, loc:(*time.Location)(0x7b57be0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696438998, loc:(*time.Location)(0x7b57be0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696438995, loc:(*time.Location)(0x7b57be0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-555f47f5d7\" is progressing."}}, CollisionCount:(*int32)(nil)} +Jun 18 07:16:42.538: INFO: all replica sets need to contain the pod-template-hash label +Jun 18 07:16:42.538: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696438995, loc:(*time.Location)(0x7b57be0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696438995, loc:(*time.Location)(0x7b57be0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696439001, loc:(*time.Location)(0x7b57be0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696438995, loc:(*time.Location)(0x7b57be0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-555f47f5d7\" is progressing."}}, CollisionCount:(*int32)(nil)} +Jun 18 07:16:44.612: INFO: all replica sets need to contain the pod-template-hash label +Jun 18 07:16:44.612: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696438995, loc:(*time.Location)(0x7b57be0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696438995, loc:(*time.Location)(0x7b57be0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696439001, loc:(*time.Location)(0x7b57be0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696438995, loc:(*time.Location)(0x7b57be0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-555f47f5d7\" is progressing."}}, CollisionCount:(*int32)(nil)} +Jun 18 07:16:46.543: INFO: all replica sets need to contain the pod-template-hash label +Jun 18 07:16:46.543: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696438995, loc:(*time.Location)(0x7b57be0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696438995, loc:(*time.Location)(0x7b57be0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696439001, loc:(*time.Location)(0x7b57be0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696438995, loc:(*time.Location)(0x7b57be0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-555f47f5d7\" is progressing."}}, CollisionCount:(*int32)(nil)} +Jun 18 07:16:48.538: INFO: all replica sets need to contain the pod-template-hash label +Jun 18 07:16:48.538: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696438995, loc:(*time.Location)(0x7b57be0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696438995, loc:(*time.Location)(0x7b57be0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696439001, loc:(*time.Location)(0x7b57be0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696438995, loc:(*time.Location)(0x7b57be0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-555f47f5d7\" is progressing."}}, CollisionCount:(*int32)(nil)} +Jun 18 07:16:50.539: INFO: all replica sets need to contain the pod-template-hash label +Jun 18 07:16:50.539: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696438995, loc:(*time.Location)(0x7b57be0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696438995, loc:(*time.Location)(0x7b57be0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696439001, loc:(*time.Location)(0x7b57be0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696438995, loc:(*time.Location)(0x7b57be0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-555f47f5d7\" is progressing."}}, CollisionCount:(*int32)(nil)} +Jun 18 07:16:52.542: INFO: +Jun 18 07:16:52.542: INFO: Ensure that both old replica sets have no replicas +[AfterEach] [sig-apps] Deployment + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:59 +Jun 18 07:16:52.550: INFO: Deployment "test-rollover-deployment": +&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-deployment,GenerateName:,Namespace:e2e-tests-deployment-qks7l,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-qks7l/deployments/test-rollover-deployment,UID:01b25be6-9199-11e9-8cfd-00163e000a67,ResourceVersion:13523705,Generation:2,CreationTimestamp:2019-06-18 07:16:35 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,},Annotations:map[string]string{deployment.kubernetes.io/revision: 2,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:DeploymentSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{redis reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0 [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:0,MaxSurge:1,},},MinReadySeconds:10,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[{Available True 2019-06-18 07:16:35 +0000 UTC 2019-06-18 07:16:35 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} {Progressing True 2019-06-18 07:16:51 +0000 UTC 2019-06-18 07:16:35 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-rollover-deployment-555f47f5d7" has successfully progressed.}],ReadyReplicas:1,CollisionCount:nil,},} + +Jun 18 07:16:52.553: INFO: New ReplicaSet "test-rollover-deployment-555f47f5d7" of Deployment "test-rollover-deployment": +&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-deployment-555f47f5d7,GenerateName:,Namespace:e2e-tests-deployment-qks7l,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-qks7l/replicasets/test-rollover-deployment-555f47f5d7,UID:02e9d44a-9199-11e9-8cfd-00163e000a67,ResourceVersion:13523696,Generation:2,CreationTimestamp:2019-06-18 07:16:37 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 555f47f5d7,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 2,},OwnerReferences:[{apps/v1 Deployment test-rollover-deployment 01b25be6-9199-11e9-8cfd-00163e000a67 0xc001672d27 0xc001672d28}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod-template-hash: 555f47f5d7,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 555f47f5d7,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{redis reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0 [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:10,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:2,ReadyReplicas:1,AvailableReplicas:1,Conditions:[],},} +Jun 18 07:16:52.553: INFO: All old ReplicaSets of Deployment "test-rollover-deployment": +Jun 18 07:16:52.553: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-controller,GenerateName:,Namespace:e2e-tests-deployment-qks7l,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-qks7l/replicasets/test-rollover-controller,UID:fd7bbd74-9198-11e9-8cfd-00163e000a67,ResourceVersion:13523704,Generation:2,CreationTimestamp:2019-06-18 07:16:28 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod: nginx,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,},OwnerReferences:[{apps/v1 Deployment test-rollover-deployment 01b25be6-9199-11e9-8cfd-00163e000a67 0xc001672c67 0xc001672c68}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*0,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod: nginx,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod: nginx,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},} +Jun 18 07:16:52.553: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-deployment-6586df867b,GenerateName:,Namespace:e2e-tests-deployment-qks7l,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-qks7l/replicasets/test-rollover-deployment-6586df867b,UID:01b5f581-9199-11e9-8cfd-00163e000a67,ResourceVersion:13523569,Generation:2,CreationTimestamp:2019-06-18 07:16:35 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 6586df867b,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 1,},OwnerReferences:[{apps/v1 Deployment test-rollover-deployment 01b25be6-9199-11e9-8cfd-00163e000a67 0xc001672de7 0xc001672de8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*0,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod-template-hash: 6586df867b,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 6586df867b,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{redis-slave gcr.io/google_samples/gb-redisslave:nonexistent [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:10,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},} +Jun 18 07:16:52.558: INFO: Pod "test-rollover-deployment-555f47f5d7-7gl4k" is available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-deployment-555f47f5d7-7gl4k,GenerateName:test-rollover-deployment-555f47f5d7-,Namespace:e2e-tests-deployment-qks7l,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qks7l/pods/test-rollover-deployment-555f47f5d7-7gl4k,UID:02ee1fb4-9199-11e9-8cfd-00163e000a67,ResourceVersion:13523588,Generation:0,CreationTimestamp:2019-06-18 07:16:37 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 555f47f5d7,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet test-rollover-deployment-555f47f5d7 02e9d44a-9199-11e9-8cfd-00163e000a67 0xc001673997 0xc001673998}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-n5w9c {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-n5w9c,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{redis reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0 [] [] [] [] [] {map[] map[]} [{default-token-n5w9c true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node5,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001673a20} {node.kubernetes.io/unreachable Exists NoExecute 0xc001673a40}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:16:37 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:16:41 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:16:41 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:16:37 +0000 UTC }],Message:,Reason:,HostIP:192.168.2.155,PodIP:171.171.33.185,StartTime:2019-06-18 07:16:37 +0000 UTC,ContainerStatuses:[{redis {nil ContainerStateRunning{StartedAt:2019-06-18 07:16:40 +0000 UTC,} nil} {nil nil nil} true 0 reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0 docker-pullable://reg.kpaas.io/kubernetes-e2e-test-images/redis@sha256:2238f5a02d2648d41cc94a88f084060fbfa860890220328eb92696bf2ac649c9 docker://482c70871be6750b7a29f16ee6c7633704196d1f60df9014ca471f8edb7d2430}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +[AfterEach] [sig-apps] Deployment + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:16:52.558: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-deployment-qks7l" for this suite. +Jun 18 07:17:00.588: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:17:01.568: INFO: namespace: e2e-tests-deployment-qks7l, resource: bindings, ignored listing per whitelist +Jun 18 07:17:01.573: INFO: namespace e2e-tests-deployment-qks7l deletion completed in 9.010463443s + +• [SLOW TEST:34.928 seconds] +[sig-apps] Deployment +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + deployment should support rollover [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected downwardAPI + should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:17:01.573: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-7wh5m +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39 +[It] should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +Jun 18 07:17:02.529: INFO: Waiting up to 5m0s for pod "downwardapi-volume-11496a28-9199-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-projected-7wh5m" to be "success or failure" +Jun 18 07:17:02.534: INFO: Pod "downwardapi-volume-11496a28-9199-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 5.41571ms +Jun 18 07:17:04.538: INFO: Pod "downwardapi-volume-11496a28-9199-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.009408293s +Jun 18 07:17:06.542: INFO: Pod "downwardapi-volume-11496a28-9199-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.012945556s +STEP: Saw pod success +Jun 18 07:17:06.542: INFO: Pod "downwardapi-volume-11496a28-9199-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:17:06.545: INFO: Trying to get logs from node node5 pod downwardapi-volume-11496a28-9199-11e9-bbf5-0e74dabf3615 container client-container: +STEP: delete the pod +Jun 18 07:17:06.559: INFO: Waiting for pod downwardapi-volume-11496a28-9199-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:17:06.562: INFO: Pod downwardapi-volume-11496a28-9199-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:17:06.562: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-7wh5m" for this suite. +Jun 18 07:17:14.575: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:17:16.564: INFO: namespace: e2e-tests-projected-7wh5m, resource: bindings, ignored listing per whitelist +Jun 18 07:17:16.571: INFO: namespace e2e-tests-projected-7wh5m deletion completed in 10.005223562s + +• [SLOW TEST:14.998 seconds] +[sig-storage] Projected downwardAPI +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33 + should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl run pod + should create a pod from an image when restart is Never [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:17:16.571: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-wtkpn +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[BeforeEach] [k8s.io] Kubectl run pod + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1527 +[It] should create a pod from an image when restart is Never [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: running the image docker.io/library/nginx:1.14-alpine +Jun 18 07:17:17.518: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 run e2e-test-nginx-pod --restart=Never --generator=run-pod/v1 --image=docker.io/library/nginx:1.14-alpine --namespace=e2e-tests-kubectl-wtkpn' +Jun 18 07:17:17.620: INFO: stderr: "" +Jun 18 07:17:17.620: INFO: stdout: "pod/e2e-test-nginx-pod created\n" +STEP: verifying the pod e2e-test-nginx-pod was created +[AfterEach] [k8s.io] Kubectl run pod + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1532 +Jun 18 07:17:17.623: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 delete pods e2e-test-nginx-pod --namespace=e2e-tests-kubectl-wtkpn' +Jun 18 07:17:20.955: INFO: stderr: "" +Jun 18 07:17:20.955: INFO: stdout: "pod \"e2e-test-nginx-pod\" deleted\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:17:20.955: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-wtkpn" for this suite. +Jun 18 07:17:28.968: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:17:29.011: INFO: namespace: e2e-tests-kubectl-wtkpn, resource: bindings, ignored listing per whitelist +Jun 18 07:17:29.278: INFO: namespace e2e-tests-kubectl-wtkpn deletion completed in 8.320007807s + +• [SLOW TEST:12.707 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Kubectl run pod + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should create a pod from an image when restart is Never [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSS +------------------------------ +[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + should perform canary updates and phased rolling updates of template modifications [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:17:29.278: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename statefulset +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-statefulset-gxn9t +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:59 +[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:74 +STEP: Creating service test in namespace e2e-tests-statefulset-gxn9t +[It] should perform canary updates and phased rolling updates of template modifications [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a new StaefulSet +Jun 18 07:17:30.533: INFO: Found 0 stateful pods, waiting for 3 +Jun 18 07:17:40.536: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true +Jun 18 07:17:40.536: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true +Jun 18 07:17:40.536: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Updating stateful set template: update image from docker.io/library/nginx:1.14-alpine to docker.io/library/nginx:1.15-alpine +Jun 18 07:17:40.571: INFO: Updating stateful set ss2 +STEP: Creating a new revision +STEP: Not applying an update when the partition is greater than the number of replicas +STEP: Performing a canary update +Jun 18 07:17:50.605: INFO: Updating stateful set ss2 +Jun 18 07:17:50.614: INFO: Waiting for Pod e2e-tests-statefulset-gxn9t/ss2-2 to have revision ss2-c79899b9 update revision ss2-787997d666 +STEP: Restoring Pods to the correct revision when they are deleted +Jun 18 07:18:00.645: INFO: Found 1 stateful pods, waiting for 3 +Jun 18 07:18:10.649: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true +Jun 18 07:18:10.649: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true +Jun 18 07:18:10.649: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Performing a phased rolling update +Jun 18 07:18:10.674: INFO: Updating stateful set ss2 +Jun 18 07:18:10.678: INFO: Waiting for Pod e2e-tests-statefulset-gxn9t/ss2-1 to have revision ss2-c79899b9 update revision ss2-787997d666 +Jun 18 07:18:20.683: INFO: Waiting for Pod e2e-tests-statefulset-gxn9t/ss2-1 to have revision ss2-c79899b9 update revision ss2-787997d666 +Jun 18 07:18:30.703: INFO: Updating stateful set ss2 +Jun 18 07:18:30.708: INFO: Waiting for StatefulSet e2e-tests-statefulset-gxn9t/ss2 to complete update +Jun 18 07:18:30.708: INFO: Waiting for Pod e2e-tests-statefulset-gxn9t/ss2-0 to have revision ss2-c79899b9 update revision ss2-787997d666 +Jun 18 07:18:40.714: INFO: Waiting for StatefulSet e2e-tests-statefulset-gxn9t/ss2 to complete update +[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:85 +Jun 18 07:18:50.719: INFO: Deleting all statefulset in ns e2e-tests-statefulset-gxn9t +Jun 18 07:18:50.721: INFO: Scaling statefulset ss2 to 0 +Jun 18 07:19:20.731: INFO: Waiting for statefulset status.replicas updated to 0 +Jun 18 07:19:20.734: INFO: Deleting statefulset ss2 +[AfterEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:19:20.747: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-statefulset-gxn9t" for this suite. +Jun 18 07:19:29.520: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:19:29.584: INFO: namespace: e2e-tests-statefulset-gxn9t, resource: bindings, ignored listing per whitelist +Jun 18 07:19:30.517: INFO: namespace e2e-tests-statefulset-gxn9t deletion completed in 9.76539278s + +• [SLOW TEST:121.239 seconds] +[sig-apps] StatefulSet +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should perform canary updates and phased rolling updates of template modifications [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Subpath Atomic writer volumes + should support subpaths with configmap pod with mountPath of existing file [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Subpath + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:19:30.518: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename subpath +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-subpath-b5b7g +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] Atomic writer volumes + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38 +STEP: Setting up data +[It] should support subpaths with configmap pod with mountPath of existing file [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating pod pod-subpath-test-configmap-bz4w +STEP: Creating a pod to test atomic-volume-subpath +Jun 18 07:19:31.516: INFO: Waiting up to 5m0s for pod "pod-subpath-test-configmap-bz4w" in namespace "e2e-tests-subpath-b5b7g" to be "success or failure" +Jun 18 07:19:31.518: INFO: Pod "pod-subpath-test-configmap-bz4w": Phase="Pending", Reason="", readiness=false. Elapsed: 2.503454ms +Jun 18 07:19:33.521: INFO: Pod "pod-subpath-test-configmap-bz4w": Phase="Pending", Reason="", readiness=false. Elapsed: 2.005297874s +Jun 18 07:19:35.525: INFO: Pod "pod-subpath-test-configmap-bz4w": Phase="Running", Reason="", readiness=false. Elapsed: 4.009044627s +Jun 18 07:19:37.529: INFO: Pod "pod-subpath-test-configmap-bz4w": Phase="Running", Reason="", readiness=false. Elapsed: 6.013421381s +Jun 18 07:19:39.535: INFO: Pod "pod-subpath-test-configmap-bz4w": Phase="Running", Reason="", readiness=false. Elapsed: 8.01959696s +Jun 18 07:19:41.540: INFO: Pod "pod-subpath-test-configmap-bz4w": Phase="Running", Reason="", readiness=false. Elapsed: 10.024608532s +Jun 18 07:19:43.549: INFO: Pod "pod-subpath-test-configmap-bz4w": Phase="Running", Reason="", readiness=false. Elapsed: 12.033421191s +Jun 18 07:19:45.552: INFO: Pod "pod-subpath-test-configmap-bz4w": Phase="Running", Reason="", readiness=false. Elapsed: 14.03595323s +Jun 18 07:19:47.554: INFO: Pod "pod-subpath-test-configmap-bz4w": Phase="Running", Reason="", readiness=false. Elapsed: 16.038804193s +Jun 18 07:19:49.557: INFO: Pod "pod-subpath-test-configmap-bz4w": Phase="Running", Reason="", readiness=false. Elapsed: 18.04166206s +Jun 18 07:19:51.560: INFO: Pod "pod-subpath-test-configmap-bz4w": Phase="Running", Reason="", readiness=false. Elapsed: 20.044365675s +Jun 18 07:19:53.563: INFO: Pod "pod-subpath-test-configmap-bz4w": Phase="Succeeded", Reason="", readiness=false. Elapsed: 22.047489416s +STEP: Saw pod success +Jun 18 07:19:53.563: INFO: Pod "pod-subpath-test-configmap-bz4w" satisfied condition "success or failure" +Jun 18 07:19:53.567: INFO: Trying to get logs from node node5 pod pod-subpath-test-configmap-bz4w container test-container-subpath-configmap-bz4w: +STEP: delete the pod +Jun 18 07:19:53.595: INFO: Waiting for pod pod-subpath-test-configmap-bz4w to disappear +Jun 18 07:19:53.598: INFO: Pod pod-subpath-test-configmap-bz4w no longer exists +STEP: Deleting pod pod-subpath-test-configmap-bz4w +Jun 18 07:19:53.598: INFO: Deleting pod "pod-subpath-test-configmap-bz4w" in namespace "e2e-tests-subpath-b5b7g" +[AfterEach] [sig-storage] Subpath + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:19:53.602: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-subpath-b5b7g" for this suite. +Jun 18 07:20:01.626: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:20:01.768: INFO: namespace: e2e-tests-subpath-b5b7g, resource: bindings, ignored listing per whitelist +Jun 18 07:20:02.548: INFO: namespace e2e-tests-subpath-b5b7g deletion completed in 8.942846085s + +• [SLOW TEST:32.030 seconds] +[sig-storage] Subpath +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22 + Atomic writer volumes + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34 + should support subpaths with configmap pod with mountPath of existing file [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-storage] Secrets + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Secrets + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:20:02.548: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename secrets +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-secrets-d77p9 +STEP: Waiting for a default service account to be provisioned in namespace +[It] optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating secret with name s-test-opt-del-7d287857-9199-11e9-bbf5-0e74dabf3615 +STEP: Creating secret with name s-test-opt-upd-7d2878be-9199-11e9-bbf5-0e74dabf3615 +STEP: Creating the pod +STEP: Deleting secret s-test-opt-del-7d287857-9199-11e9-bbf5-0e74dabf3615 +STEP: Updating secret s-test-opt-upd-7d2878be-9199-11e9-bbf5-0e74dabf3615 +STEP: Creating secret with name s-test-opt-create-7d2879cf-9199-11e9-bbf5-0e74dabf3615 +STEP: waiting to observe update in volume +[AfterEach] [sig-storage] Secrets + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:21:08.698: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-secrets-d77p9" for this suite. +Jun 18 07:21:37.563: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:21:39.655: INFO: namespace: e2e-tests-secrets-d77p9, resource: bindings, ignored listing per whitelist +Jun 18 07:21:39.680: INFO: namespace e2e-tests-secrets-d77p9 deletion completed in 30.978929891s + +• [SLOW TEST:97.132 seconds] +[sig-storage] Secrets +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:34 + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSS +------------------------------ +[sig-storage] Projected downwardAPI + should provide container's cpu limit [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:21:39.680: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-fxsz6 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39 +[It] should provide container's cpu limit [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +Jun 18 07:21:41.532: INFO: Waiting up to 5m0s for pod "downwardapi-volume-b808998b-9199-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-projected-fxsz6" to be "success or failure" +Jun 18 07:21:41.546: INFO: Pod "downwardapi-volume-b808998b-9199-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 13.566122ms +Jun 18 07:21:43.555: INFO: Pod "downwardapi-volume-b808998b-9199-11e9-bbf5-0e74dabf3615": Phase="Running", Reason="", readiness=true. Elapsed: 2.0228399s +Jun 18 07:21:45.558: INFO: Pod "downwardapi-volume-b808998b-9199-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.02559893s +STEP: Saw pod success +Jun 18 07:21:45.558: INFO: Pod "downwardapi-volume-b808998b-9199-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:21:45.560: INFO: Trying to get logs from node node5 pod downwardapi-volume-b808998b-9199-11e9-bbf5-0e74dabf3615 container client-container: +STEP: delete the pod +Jun 18 07:21:45.574: INFO: Waiting for pod downwardapi-volume-b808998b-9199-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:21:45.576: INFO: Pod downwardapi-volume-b808998b-9199-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:21:45.576: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-fxsz6" for this suite. +Jun 18 07:21:53.594: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:21:54.513: INFO: namespace: e2e-tests-projected-fxsz6, resource: bindings, ignored listing per whitelist +Jun 18 07:21:54.517: INFO: namespace e2e-tests-projected-fxsz6 deletion completed in 8.936108554s + +• [SLOW TEST:14.837 seconds] +[sig-storage] Projected downwardAPI +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33 + should provide container's cpu limit [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] ConfigMap + should be consumable from pods in volume as non-root [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:21:54.518: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename configmap +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-configmap-hlt6c +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume as non-root [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap with name configmap-test-volume-bfe37f8b-9199-11e9-bbf5-0e74dabf3615 +STEP: Creating a pod to test consume configMaps +Jun 18 07:21:54.695: INFO: Waiting up to 5m0s for pod "pod-configmaps-bfe3e35a-9199-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-configmap-hlt6c" to be "success or failure" +Jun 18 07:21:54.699: INFO: Pod "pod-configmaps-bfe3e35a-9199-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 3.568379ms +Jun 18 07:21:56.701: INFO: Pod "pod-configmaps-bfe3e35a-9199-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.005849554s +Jun 18 07:21:58.707: INFO: Pod "pod-configmaps-bfe3e35a-9199-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.011315833s +STEP: Saw pod success +Jun 18 07:21:58.707: INFO: Pod "pod-configmaps-bfe3e35a-9199-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:21:58.710: INFO: Trying to get logs from node node5 pod pod-configmaps-bfe3e35a-9199-11e9-bbf5-0e74dabf3615 container configmap-volume-test: +STEP: delete the pod +Jun 18 07:21:58.732: INFO: Waiting for pod pod-configmaps-bfe3e35a-9199-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:21:58.734: INFO: Pod pod-configmaps-bfe3e35a-9199-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:21:58.734: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-configmap-hlt6c" for this suite. +Jun 18 07:22:06.749: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:22:06.779: INFO: namespace: e2e-tests-configmap-hlt6c, resource: bindings, ignored listing per whitelist +Jun 18 07:22:07.621: INFO: namespace e2e-tests-configmap-hlt6c deletion completed in 8.883859099s + +• [SLOW TEST:13.103 seconds] +[sig-storage] ConfigMap +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33 + should be consumable from pods in volume as non-root [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl run rc + should create an rc from an image [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:22:07.621: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-49l4v +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[BeforeEach] [k8s.io] Kubectl run rc + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1298 +[It] should create an rc from an image [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: running the image docker.io/library/nginx:1.14-alpine +Jun 18 07:22:07.789: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 run e2e-test-nginx-rc --image=docker.io/library/nginx:1.14-alpine --generator=run/v1 --namespace=e2e-tests-kubectl-49l4v' +Jun 18 07:22:08.537: INFO: stderr: "kubectl run --generator=run/v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n" +Jun 18 07:22:08.537: INFO: stdout: "replicationcontroller/e2e-test-nginx-rc created\n" +STEP: verifying the rc e2e-test-nginx-rc was created +STEP: verifying the pod controlled by rc e2e-test-nginx-rc was created +STEP: confirm that you can get logs from an rc +Jun 18 07:22:08.559: INFO: Waiting up to 5m0s for 1 pods to be running and ready: [e2e-test-nginx-rc-gq4vf] +Jun 18 07:22:08.559: INFO: Waiting up to 5m0s for pod "e2e-test-nginx-rc-gq4vf" in namespace "e2e-tests-kubectl-49l4v" to be "running and ready" +Jun 18 07:22:08.560: INFO: Pod "e2e-test-nginx-rc-gq4vf": Phase="Pending", Reason="", readiness=false. Elapsed: 1.537147ms +Jun 18 07:22:10.565: INFO: Pod "e2e-test-nginx-rc-gq4vf": Phase="Running", Reason="", readiness=true. Elapsed: 2.005892952s +Jun 18 07:22:10.565: INFO: Pod "e2e-test-nginx-rc-gq4vf" satisfied condition "running and ready" +Jun 18 07:22:10.565: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [e2e-test-nginx-rc-gq4vf] +Jun 18 07:22:10.565: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 logs rc/e2e-test-nginx-rc --namespace=e2e-tests-kubectl-49l4v' +Jun 18 07:22:10.658: INFO: stderr: "" +Jun 18 07:22:10.658: INFO: stdout: "" +[AfterEach] [k8s.io] Kubectl run rc + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1303 +Jun 18 07:22:10.658: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 delete rc e2e-test-nginx-rc --namespace=e2e-tests-kubectl-49l4v' +Jun 18 07:22:10.738: INFO: stderr: "" +Jun 18 07:22:10.739: INFO: stdout: "replicationcontroller \"e2e-test-nginx-rc\" deleted\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:22:10.739: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-49l4v" for this suite. +Jun 18 07:22:19.533: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:22:20.733: INFO: namespace: e2e-tests-kubectl-49l4v, resource: bindings, ignored listing per whitelist +Jun 18 07:22:21.617: INFO: namespace e2e-tests-kubectl-49l4v deletion completed in 10.874720783s + +• [SLOW TEST:13.996 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Kubectl run rc + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should create an rc from an image [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSS +------------------------------ +[sig-apps] ReplicationController + should serve a basic image on each replica with a public image [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] ReplicationController + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:22:21.617: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename replication-controller +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-replication-controller-xbktr +STEP: Waiting for a default service account to be provisioned in namespace +[It] should serve a basic image on each replica with a public image [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating replication controller my-hostname-basic-d078f349-9199-11e9-bbf5-0e74dabf3615 +Jun 18 07:22:22.520: INFO: Pod name my-hostname-basic-d078f349-9199-11e9-bbf5-0e74dabf3615: Found 0 pods out of 1 +Jun 18 07:22:27.524: INFO: Pod name my-hostname-basic-d078f349-9199-11e9-bbf5-0e74dabf3615: Found 1 pods out of 1 +Jun 18 07:22:27.525: INFO: Ensuring all pods for ReplicationController "my-hostname-basic-d078f349-9199-11e9-bbf5-0e74dabf3615" are running +Jun 18 07:22:27.527: INFO: Pod "my-hostname-basic-d078f349-9199-11e9-bbf5-0e74dabf3615-5dldf" is running (conditions: [{Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-06-18 07:22:22 +0000 UTC Reason: Message:} {Type:Ready Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-06-18 07:22:26 +0000 UTC Reason: Message:} {Type:ContainersReady Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-06-18 07:22:26 +0000 UTC Reason: Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-06-18 07:22:22 +0000 UTC Reason: Message:}]) +Jun 18 07:22:27.527: INFO: Trying to dial the pod +Jun 18 07:22:32.536: INFO: Controller my-hostname-basic-d078f349-9199-11e9-bbf5-0e74dabf3615: Got expected result from replica 1 [my-hostname-basic-d078f349-9199-11e9-bbf5-0e74dabf3615-5dldf]: "my-hostname-basic-d078f349-9199-11e9-bbf5-0e74dabf3615-5dldf", 1 of 1 required successes so far +[AfterEach] [sig-apps] ReplicationController + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:22:32.536: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-replication-controller-xbktr" for this suite. +Jun 18 07:22:42.550: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:22:42.559: INFO: namespace: e2e-tests-replication-controller-xbktr, resource: bindings, ignored listing per whitelist +Jun 18 07:22:42.865: INFO: namespace e2e-tests-replication-controller-xbktr deletion completed in 10.324823453s + +• [SLOW TEST:21.248 seconds] +[sig-apps] ReplicationController +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + should serve a basic image on each replica with a public image [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Proxy server + should support --unix-socket=/path [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:22:42.865: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-fmjvg +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[It] should support --unix-socket=/path [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Starting the proxy +Jun 18 07:22:43.518: INFO: Asynchronously running '/usr/local/bin/kubectl kubectl --kubeconfig=/tmp/kubeconfig-656024001 proxy --unix-socket=/tmp/kubectl-proxy-unix367965740/test' +STEP: retrieving proxy /api/ output +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:22:43.581: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-fmjvg" for this suite. +Jun 18 07:22:51.603: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:22:51.641: INFO: namespace: e2e-tests-kubectl-fmjvg, resource: bindings, ignored listing per whitelist +Jun 18 07:22:52.618: INFO: namespace e2e-tests-kubectl-fmjvg deletion completed in 9.031140792s + +• [SLOW TEST:9.752 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Proxy server + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should support --unix-socket=/path [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (root,0666,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:22:52.618: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename emptydir +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-8hvtp +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (root,0666,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test emptydir 0666 on tmpfs +Jun 18 07:22:53.524: INFO: Waiting up to 5m0s for pod "pod-e2f31ee4-9199-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-emptydir-8hvtp" to be "success or failure" +Jun 18 07:22:53.526: INFO: Pod "pod-e2f31ee4-9199-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.241704ms +Jun 18 07:22:55.529: INFO: Pod "pod-e2f31ee4-9199-11e9-bbf5-0e74dabf3615": Phase="Running", Reason="", readiness=true. Elapsed: 2.005060478s +Jun 18 07:22:57.532: INFO: Pod "pod-e2f31ee4-9199-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.007486289s +STEP: Saw pod success +Jun 18 07:22:57.532: INFO: Pod "pod-e2f31ee4-9199-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:22:57.534: INFO: Trying to get logs from node node5 pod pod-e2f31ee4-9199-11e9-bbf5-0e74dabf3615 container test-container: +STEP: delete the pod +Jun 18 07:22:57.549: INFO: Waiting for pod pod-e2f31ee4-9199-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:22:57.552: INFO: Pod pod-e2f31ee4-9199-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:22:57.552: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-emptydir-8hvtp" for this suite. +Jun 18 07:23:05.564: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:23:05.636: INFO: namespace: e2e-tests-emptydir-8hvtp, resource: bindings, ignored listing per whitelist +Jun 18 07:23:06.523: INFO: namespace e2e-tests-emptydir-8hvtp deletion completed in 8.96844797s + +• [SLOW TEST:13.906 seconds] +[sig-storage] EmptyDir volumes +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40 + should support (root,0666,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-api-machinery] Watchers + should observe an object deletion if it stops meeting the requirements of the selector [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-api-machinery] Watchers + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:23:06.524: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename watch +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-watch-l8cbw +STEP: Waiting for a default service account to be provisioned in namespace +[It] should observe an object deletion if it stops meeting the requirements of the selector [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating a watch on configmaps with a certain label +STEP: creating a new configmap +STEP: modifying the configmap once +STEP: changing the label value of the configmap +STEP: Expecting to observe a delete notification for the watched object +Jun 18 07:23:07.545: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:e2e-tests-watch-l8cbw,SelfLink:/api/v1/namespaces/e2e-tests-watch-l8cbw/configmaps/e2e-watch-test-label-changed,UID:eacfca1e-9199-11e9-8cfd-00163e000a67,ResourceVersion:13526100,Generation:0,CreationTimestamp:2019-06-18 07:23:06 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{},BinaryData:map[string][]byte{},} +Jun 18 07:23:07.545: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:e2e-tests-watch-l8cbw,SelfLink:/api/v1/namespaces/e2e-tests-watch-l8cbw/configmaps/e2e-watch-test-label-changed,UID:eacfca1e-9199-11e9-8cfd-00163e000a67,ResourceVersion:13526101,Generation:0,CreationTimestamp:2019-06-18 07:23:06 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},} +Jun 18 07:23:07.545: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:e2e-tests-watch-l8cbw,SelfLink:/api/v1/namespaces/e2e-tests-watch-l8cbw/configmaps/e2e-watch-test-label-changed,UID:eacfca1e-9199-11e9-8cfd-00163e000a67,ResourceVersion:13526102,Generation:0,CreationTimestamp:2019-06-18 07:23:06 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},} +STEP: modifying the configmap a second time +STEP: Expecting not to observe a notification because the object no longer meets the selector's requirements +STEP: changing the label value of the configmap back +STEP: modifying the configmap a third time +STEP: deleting the configmap +STEP: Expecting to observe an add notification for the watched object when the label value was restored +Jun 18 07:23:17.580: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:e2e-tests-watch-l8cbw,SelfLink:/api/v1/namespaces/e2e-tests-watch-l8cbw/configmaps/e2e-watch-test-label-changed,UID:eacfca1e-9199-11e9-8cfd-00163e000a67,ResourceVersion:13526146,Generation:0,CreationTimestamp:2019-06-18 07:23:06 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},} +Jun 18 07:23:17.580: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:e2e-tests-watch-l8cbw,SelfLink:/api/v1/namespaces/e2e-tests-watch-l8cbw/configmaps/e2e-watch-test-label-changed,UID:eacfca1e-9199-11e9-8cfd-00163e000a67,ResourceVersion:13526147,Generation:0,CreationTimestamp:2019-06-18 07:23:06 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},} +Jun 18 07:23:17.580: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:e2e-tests-watch-l8cbw,SelfLink:/api/v1/namespaces/e2e-tests-watch-l8cbw/configmaps/e2e-watch-test-label-changed,UID:eacfca1e-9199-11e9-8cfd-00163e000a67,ResourceVersion:13526148,Generation:0,CreationTimestamp:2019-06-18 07:23:06 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},} +[AfterEach] [sig-api-machinery] Watchers + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:23:17.580: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-watch-l8cbw" for this suite. +Jun 18 07:23:25.595: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:23:25.657: INFO: namespace: e2e-tests-watch-l8cbw, resource: bindings, ignored listing per whitelist +Jun 18 07:23:25.907: INFO: namespace e2e-tests-watch-l8cbw deletion completed in 8.323975661s + +• [SLOW TEST:19.384 seconds] +[sig-api-machinery] Watchers +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + should observe an object deletion if it stops meeting the requirements of the selector [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Downward API + should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-node] Downward API + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:23:25.907: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename downward-api +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-w92q2 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward api env vars +Jun 18 07:23:26.515: INFO: Waiting up to 5m0s for pod "downward-api-f69e44d5-9199-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-downward-api-w92q2" to be "success or failure" +Jun 18 07:23:26.519: INFO: Pod "downward-api-f69e44d5-9199-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 4.360655ms +Jun 18 07:23:28.523: INFO: Pod "downward-api-f69e44d5-9199-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.007921407s +Jun 18 07:23:30.525: INFO: Pod "downward-api-f69e44d5-9199-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.010355331s +STEP: Saw pod success +Jun 18 07:23:30.525: INFO: Pod "downward-api-f69e44d5-9199-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:23:30.527: INFO: Trying to get logs from node node5 pod downward-api-f69e44d5-9199-11e9-bbf5-0e74dabf3615 container dapi-container: +STEP: delete the pod +Jun 18 07:23:30.571: INFO: Waiting for pod downward-api-f69e44d5-9199-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:23:30.573: INFO: Pod downward-api-f69e44d5-9199-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-node] Downward API + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:23:30.573: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-downward-api-w92q2" for this suite. +Jun 18 07:23:38.589: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:23:39.633: INFO: namespace: e2e-tests-downward-api-w92q2, resource: bindings, ignored listing per whitelist +Jun 18 07:23:40.648: INFO: namespace e2e-tests-downward-api-w92q2 deletion completed in 10.072026506s + +• [SLOW TEST:14.741 seconds] +[sig-node] Downward API +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:38 + should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl run default + should create an rc or deployment from an image [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:23:40.649: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-jtrh2 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[BeforeEach] [k8s.io] Kubectl run default + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1262 +[It] should create an rc or deployment from an image [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: running the image docker.io/library/nginx:1.14-alpine +Jun 18 07:23:41.704: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 run e2e-test-nginx-deployment --image=docker.io/library/nginx:1.14-alpine --namespace=e2e-tests-kubectl-jtrh2' +Jun 18 07:23:42.531: INFO: stderr: "kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n" +Jun 18 07:23:42.531: INFO: stdout: "deployment.apps/e2e-test-nginx-deployment created\n" +STEP: verifying the pod controlled by e2e-test-nginx-deployment gets created +[AfterEach] [k8s.io] Kubectl run default + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1268 +Jun 18 07:23:44.543: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 delete deployment e2e-test-nginx-deployment --namespace=e2e-tests-kubectl-jtrh2' +Jun 18 07:23:44.787: INFO: stderr: "" +Jun 18 07:23:44.787: INFO: stdout: "deployment.extensions \"e2e-test-nginx-deployment\" deleted\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:23:44.787: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-jtrh2" for this suite. +Jun 18 07:23:51.521: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:23:51.609: INFO: namespace: e2e-tests-kubectl-jtrh2, resource: bindings, ignored listing per whitelist +Jun 18 07:23:52.516: INFO: namespace e2e-tests-kubectl-jtrh2 deletion completed in 7.724035564s + +• [SLOW TEST:11.868 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Kubectl run default + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should create an rc or deployment from an image [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Garbage collector + should delete RS created by deployment when not orphaning [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-api-machinery] Garbage collector + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:23:52.517: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename gc +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-gc-nshxx +STEP: Waiting for a default service account to be provisioned in namespace +[It] should delete RS created by deployment when not orphaning [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: create the deployment +STEP: Wait for the Deployment to create new ReplicaSet +STEP: delete the deployment +STEP: wait for all rs to be garbage collected +STEP: expected 0 rs, got 1 rs +STEP: expected 0 pods, got 2 pods +STEP: Gathering metrics +W0618 07:23:55.552893 16 metrics_grabber.go:81] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled. +Jun 18 07:23:55.552: INFO: For apiserver_request_count: +For apiserver_request_latencies_summary: +For etcd_helper_cache_entry_count: +For etcd_helper_cache_hit_count: +For etcd_helper_cache_miss_count: +For etcd_request_cache_add_latencies_summary: +For etcd_request_cache_get_latencies_summary: +For etcd_request_latencies_summary: +For garbage_collector_attempt_to_delete_queue_latency: +For garbage_collector_attempt_to_delete_work_duration: +For garbage_collector_attempt_to_orphan_queue_latency: +For garbage_collector_attempt_to_orphan_work_duration: +For garbage_collector_dirty_processing_latency_microseconds: +For garbage_collector_event_processing_latency_microseconds: +For garbage_collector_graph_changes_queue_latency: +For garbage_collector_graph_changes_work_duration: +For garbage_collector_orphan_processing_latency_microseconds: +For namespace_queue_latency: +For namespace_queue_latency_sum: +For namespace_queue_latency_count: +For namespace_retries: +For namespace_work_duration: +For namespace_work_duration_sum: +For namespace_work_duration_count: +For function_duration_seconds: +For errors_total: +For evicted_pods_total: + +[AfterEach] [sig-api-machinery] Garbage collector + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:23:55.552: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-gc-nshxx" for this suite. +Jun 18 07:24:03.573: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:24:03.617: INFO: namespace: e2e-tests-gc-nshxx, resource: bindings, ignored listing per whitelist +Jun 18 07:24:04.553: INFO: namespace e2e-tests-gc-nshxx deletion completed in 8.99470574s + +• [SLOW TEST:12.036 seconds] +[sig-api-machinery] Garbage collector +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + should delete RS created by deployment when not orphaning [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSS +------------------------------ +[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook + should execute prestop exec hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Container Lifecycle Hook + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:24:04.553: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename container-lifecycle-hook +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-container-lifecycle-hook-zmwd8 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] when create a pod with lifecycle hook + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:61 +STEP: create the container to handle the HTTPGet hook request. +[It] should execute prestop exec hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: create the pod with lifecycle hook +STEP: delete the pod with lifecycle hook +Jun 18 07:24:09.571: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +Jun 18 07:24:09.575: INFO: Pod pod-with-prestop-exec-hook still exists +Jun 18 07:24:11.575: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +Jun 18 07:24:11.579: INFO: Pod pod-with-prestop-exec-hook still exists +Jun 18 07:24:13.575: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +Jun 18 07:24:13.580: INFO: Pod pod-with-prestop-exec-hook still exists +Jun 18 07:24:15.575: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +Jun 18 07:24:15.578: INFO: Pod pod-with-prestop-exec-hook still exists +Jun 18 07:24:17.575: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +Jun 18 07:24:17.578: INFO: Pod pod-with-prestop-exec-hook still exists +Jun 18 07:24:19.575: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +Jun 18 07:24:19.580: INFO: Pod pod-with-prestop-exec-hook still exists +Jun 18 07:24:21.575: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +Jun 18 07:24:21.578: INFO: Pod pod-with-prestop-exec-hook still exists +Jun 18 07:24:23.575: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +Jun 18 07:24:23.578: INFO: Pod pod-with-prestop-exec-hook still exists +Jun 18 07:24:25.575: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +Jun 18 07:24:25.578: INFO: Pod pod-with-prestop-exec-hook still exists +Jun 18 07:24:27.575: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +Jun 18 07:24:27.578: INFO: Pod pod-with-prestop-exec-hook still exists +Jun 18 07:24:29.575: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +Jun 18 07:24:29.578: INFO: Pod pod-with-prestop-exec-hook no longer exists +STEP: check prestop hook +[AfterEach] [k8s.io] Container Lifecycle Hook + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:24:29.585: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-container-lifecycle-hook-zmwd8" for this suite. +Jun 18 07:24:53.599: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:24:53.643: INFO: namespace: e2e-tests-container-lifecycle-hook-zmwd8, resource: bindings, ignored listing per whitelist +Jun 18 07:24:54.521: INFO: namespace e2e-tests-container-lifecycle-hook-zmwd8 deletion completed in 24.932491611s + +• [SLOW TEST:49.968 seconds] +[k8s.io] Container Lifecycle Hook +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + when create a pod with lifecycle hook + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:40 + should execute prestop exec hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Garbage collector + should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-api-machinery] Garbage collector + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:24:54.522: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename gc +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-gc-6l7p8 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: create the rc1 +STEP: create the rc2 +STEP: set half of pods created by rc simpletest-rc-to-be-deleted to have rc simpletest-rc-to-stay as owner as well +STEP: delete the rc simpletest-rc-to-be-deleted +STEP: wait for the rc to be deleted +STEP: Gathering metrics +W0618 07:25:04.767022 16 metrics_grabber.go:81] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled. +Jun 18 07:25:04.767: INFO: For apiserver_request_count: +For apiserver_request_latencies_summary: +For etcd_helper_cache_entry_count: +For etcd_helper_cache_hit_count: +For etcd_helper_cache_miss_count: +For etcd_request_cache_add_latencies_summary: +For etcd_request_cache_get_latencies_summary: +For etcd_request_latencies_summary: +For garbage_collector_attempt_to_delete_queue_latency: +For garbage_collector_attempt_to_delete_work_duration: +For garbage_collector_attempt_to_orphan_queue_latency: +For garbage_collector_attempt_to_orphan_work_duration: +For garbage_collector_dirty_processing_latency_microseconds: +For garbage_collector_event_processing_latency_microseconds: +For garbage_collector_graph_changes_queue_latency: +For garbage_collector_graph_changes_work_duration: +For garbage_collector_orphan_processing_latency_microseconds: +For namespace_queue_latency: +For namespace_queue_latency_sum: +For namespace_queue_latency_count: +For namespace_retries: +For namespace_work_duration: +For namespace_work_duration_sum: +For namespace_work_duration_count: +For function_duration_seconds: +For errors_total: +For evicted_pods_total: + +[AfterEach] [sig-api-machinery] Garbage collector + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:25:04.767: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-gc-6l7p8" for this suite. +Jun 18 07:25:13.531: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:25:13.589: INFO: namespace: e2e-tests-gc-6l7p8, resource: bindings, ignored listing per whitelist +Jun 18 07:25:14.571: INFO: namespace e2e-tests-gc-6l7p8 deletion completed in 9.798962635s + +• [SLOW TEST:20.049 seconds] +[sig-api-machinery] Garbage collector +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[k8s.io] Probing container + with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:25:14.571: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename container-probe +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-container-probe-rvz58 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:48 +[It] with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +Jun 18 07:25:37.536: INFO: Container started at 2019-06-18 07:25:16 +0000 UTC, pod became ready at 2019-06-18 07:25:36 +0000 UTC +[AfterEach] [k8s.io] Probing container + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:25:37.536: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-container-probe-rvz58" for this suite. +Jun 18 07:26:05.552: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:26:05.681: INFO: namespace: e2e-tests-container-probe-rvz58, resource: bindings, ignored listing per whitelist +Jun 18 07:26:06.522: INFO: namespace e2e-tests-container-probe-rvz58 deletion completed in 28.981153786s + +• [SLOW TEST:51.952 seconds] +[k8s.io] Probing container +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSS +------------------------------ +[sig-api-machinery] Garbage collector + should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-api-machinery] Garbage collector + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:26:06.522: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename gc +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-gc-42j88 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: create the rc +STEP: delete the rc +STEP: wait for the rc to be deleted +Jun 18 07:26:14.622: INFO: 5 pods remaining +Jun 18 07:26:14.622: INFO: 0 pods has nil DeletionTimestamp +Jun 18 07:26:14.622: INFO: +STEP: Gathering metrics +W0618 07:26:15.596327 16 metrics_grabber.go:81] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled. +Jun 18 07:26:15.596: INFO: For apiserver_request_count: +For apiserver_request_latencies_summary: +For etcd_helper_cache_entry_count: +For etcd_helper_cache_hit_count: +For etcd_helper_cache_miss_count: +For etcd_request_cache_add_latencies_summary: +For etcd_request_cache_get_latencies_summary: +For etcd_request_latencies_summary: +For garbage_collector_attempt_to_delete_queue_latency: +For garbage_collector_attempt_to_delete_work_duration: +For garbage_collector_attempt_to_orphan_queue_latency: +For garbage_collector_attempt_to_orphan_work_duration: +For garbage_collector_dirty_processing_latency_microseconds: +For garbage_collector_event_processing_latency_microseconds: +For garbage_collector_graph_changes_queue_latency: +For garbage_collector_graph_changes_work_duration: +For garbage_collector_orphan_processing_latency_microseconds: +For namespace_queue_latency: +For namespace_queue_latency_sum: +For namespace_queue_latency_count: +For namespace_retries: +For namespace_work_duration: +For namespace_work_duration_sum: +For namespace_work_duration_count: +For function_duration_seconds: +For errors_total: +For evicted_pods_total: + +[AfterEach] [sig-api-machinery] Garbage collector + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:26:15.596: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-gc-42j88" for this suite. +Jun 18 07:26:27.613: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:26:27.673: INFO: namespace: e2e-tests-gc-42j88, resource: bindings, ignored listing per whitelist +Jun 18 07:26:28.603: INFO: namespace e2e-tests-gc-42j88 deletion completed in 13.000674211s + +• [SLOW TEST:22.081 seconds] +[sig-api-machinery] Garbage collector +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl describe + should check if kubectl describe prints relevant information for rc and pods [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:26:28.603: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-5kf7w +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[It] should check if kubectl describe prints relevant information for rc and pods [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +Jun 18 07:26:29.621: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 version --client' +Jun 18 07:26:29.687: INFO: stderr: "" +Jun 18 07:26:29.687: INFO: stdout: "Client Version: version.Info{Major:\"1\", Minor:\"13\", GitVersion:\"v1.13.5\", GitCommit:\"2166946f41b36dea2c4626f90a77706f426cdea2\", GitTreeState:\"clean\", BuildDate:\"2019-03-25T15:26:52Z\", GoVersion:\"go1.11.5\", Compiler:\"gc\", Platform:\"linux/amd64\"}\n" +Jun 18 07:26:29.688: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 create -f - --namespace=e2e-tests-kubectl-5kf7w' +Jun 18 07:26:31.534: INFO: stderr: "" +Jun 18 07:26:31.534: INFO: stdout: "replicationcontroller/redis-master created\n" +Jun 18 07:26:31.534: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 create -f - --namespace=e2e-tests-kubectl-5kf7w' +Jun 18 07:26:32.545: INFO: stderr: "" +Jun 18 07:26:32.546: INFO: stdout: "service/redis-master created\n" +STEP: Waiting for Redis master to start. +Jun 18 07:26:33.567: INFO: Selector matched 1 pods for map[app:redis] +Jun 18 07:26:33.567: INFO: Found 0 / 1 +Jun 18 07:26:34.579: INFO: Selector matched 1 pods for map[app:redis] +Jun 18 07:26:34.580: INFO: Found 1 / 1 +Jun 18 07:26:34.580: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 +Jun 18 07:26:34.583: INFO: Selector matched 1 pods for map[app:redis] +Jun 18 07:26:34.583: INFO: ForEach: Found 1 pods from the filter. Now looping through them. +Jun 18 07:26:34.583: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 describe pod redis-master-kczfw --namespace=e2e-tests-kubectl-5kf7w' +Jun 18 07:26:34.685: INFO: stderr: "" +Jun 18 07:26:34.685: INFO: stdout: "Name: redis-master-kczfw\nNamespace: e2e-tests-kubectl-5kf7w\nPriority: 0\nPriorityClassName: \nNode: node5/192.168.2.155\nStart Time: Tue, 18 Jun 2019 07:26:31 +0000\nLabels: app=redis\n role=master\nAnnotations: \nStatus: Running\nIP: 171.171.33.152\nControlled By: ReplicationController/redis-master\nContainers:\n redis-master:\n Container ID: docker://f38a0511ba77765aa70bf8bfe82182609c41a5ef86dda27ed852aa35c1f5a779\n Image: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0\n Image ID: docker-pullable://reg.kpaas.io/kubernetes-e2e-test-images/redis@sha256:2238f5a02d2648d41cc94a88f084060fbfa860890220328eb92696bf2ac649c9\n Port: 6379/TCP\n Host Port: 0/TCP\n State: Running\n Started: Tue, 18 Jun 2019 07:26:33 +0000\n Ready: True\n Restart Count: 0\n Environment: \n Mounts:\n /var/run/secrets/kubernetes.io/serviceaccount from default-token-hrtcv (ro)\nConditions:\n Type Status\n Initialized True \n Ready True \n ContainersReady True \n PodScheduled True \nVolumes:\n default-token-hrtcv:\n Type: Secret (a volume populated by a Secret)\n SecretName: default-token-hrtcv\n Optional: false\nQoS Class: BestEffort\nNode-Selectors: \nTolerations: node.kubernetes.io/not-ready:NoExecute for 300s\n node.kubernetes.io/unreachable:NoExecute for 300s\nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal Scheduled 3s default-scheduler Successfully assigned e2e-tests-kubectl-5kf7w/redis-master-kczfw to node5\n Normal Pulled 1s kubelet, node5 Container image \"reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0\" already present on machine\n Normal Created 1s kubelet, node5 Created container\n Normal Started 1s kubelet, node5 Started container\n" +Jun 18 07:26:34.686: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 describe rc redis-master --namespace=e2e-tests-kubectl-5kf7w' +Jun 18 07:26:35.549: INFO: stderr: "" +Jun 18 07:26:35.549: INFO: stdout: "Name: redis-master\nNamespace: e2e-tests-kubectl-5kf7w\nSelector: app=redis,role=master\nLabels: app=redis\n role=master\nAnnotations: \nReplicas: 1 current / 1 desired\nPods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed\nPod Template:\n Labels: app=redis\n role=master\n Containers:\n redis-master:\n Image: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0\n Port: 6379/TCP\n Host Port: 0/TCP\n Environment: \n Mounts: \n Volumes: \nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal SuccessfulCreate 4s replication-controller Created pod: redis-master-kczfw\n" +Jun 18 07:26:35.549: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 describe service redis-master --namespace=e2e-tests-kubectl-5kf7w' +Jun 18 07:26:35.638: INFO: stderr: "" +Jun 18 07:26:35.638: INFO: stdout: "Name: redis-master\nNamespace: e2e-tests-kubectl-5kf7w\nLabels: app=redis\n role=master\nAnnotations: \nSelector: app=redis,role=master\nType: ClusterIP\nIP: 169.169.222.202\nPort: 6379/TCP\nTargetPort: redis-server/TCP\nEndpoints: 171.171.33.152:6379\nSession Affinity: None\nEvents: \n" +Jun 18 07:26:35.642: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 describe node master1' +Jun 18 07:26:35.755: INFO: stderr: "" +Jun 18 07:26:35.755: INFO: stdout: "Name: master1\nRoles: ingress,master\nLabels: beta.kubernetes.io/arch=amd64\n beta.kubernetes.io/os=linux\n kubernetes.io/hostname=master1\n node-role.kubernetes.io/ingress=contour\n node-role.kubernetes.io/master=\nAnnotations: csi.volume.kubernetes.io/nodeid: {\"cephfs.csi.ceph.com\":\"master1\",\"rbd.csi.ceph.com\":\"master1\"}\n kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock\n node.alpha.kubernetes.io/ttl: 0\n volumes.kubernetes.io/controller-managed-attach-detach: true\nCreationTimestamp: Tue, 14 May 2019 05:31:27 +0000\nTaints: node-role.kubernetes.io/master:NoSchedule\nUnschedulable: false\nConditions:\n Type Status LastHeartbeatTime LastTransitionTime Reason Message\n ---- ------ ----------------- ------------------ ------ -------\n MemoryPressure False Tue, 18 Jun 2019 07:26:27 +0000 Tue, 14 May 2019 05:31:18 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available\n DiskPressure False Tue, 18 Jun 2019 07:26:27 +0000 Sat, 15 Jun 2019 19:51:22 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure\n PIDPressure False Tue, 18 Jun 2019 07:26:27 +0000 Tue, 14 May 2019 05:31:18 +0000 KubeletHasSufficientPID kubelet has sufficient PID available\n Ready True Tue, 18 Jun 2019 07:26:27 +0000 Tue, 14 May 2019 05:34:07 +0000 KubeletReady kubelet is posting ready status\nAddresses:\n InternalIP: 192.168.2.150\n Hostname: master1\nCapacity:\n cpu: 4\n ephemeral-storage: 51473020Ki\n hugepages-1Gi: 0\n hugepages-2Mi: 0\n memory: 8010424Ki\n pods: 110\nAllocatable:\n cpu: 4\n ephemeral-storage: 47437535154\n hugepages-1Gi: 0\n hugepages-2Mi: 0\n memory: 7908024Ki\n pods: 110\nSystem Info:\n Machine ID: 36dc953195bb4389870d2591ff6bfea5\n System UUID: 5054313B-2207-4ABA-B4EA-AD411C27A950\n Boot ID: 9ef472d2-f035-4e7d-b6d0-62237d5c2196\n Kernel Version: 3.10.0-514.26.2.el7.x86_64\n OS Image: CentOS Linux 7 (Core)\n Operating System: linux\n Architecture: amd64\n Container Runtime Version: docker://17.3.2\n Kubelet Version: v1.13.5\n Kube-Proxy Version: v1.13.5\nPodCIDR: 171.171.0.0/24\nNon-terminated Pods: (27 in total)\n Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE\n --------- ---- ------------ ---------- --------------- ------------- ---\n default csi-cephfs-ceph-csi-cephfs-nodeplugin-4pz8k 0 (0%) 0 (0%) 0 (0%) 0 (0%) 34d\n default csi-rbd-ceph-csi-rbd-nodeplugin-rz5cb 0 (0%) 0 (0%) 0 (0%) 0 (0%) 34d\n kube-system alert-apiserver-5f887ff458-4s6dg 0 (0%) 0 (0%) 0 (0%) 0 (0%) 5d\n kube-system calico-node-bc6j9 250m (6%) 0 (0%) 0 (0%) 0 (0%) 3d11h\n kube-system contour-mzv5l 0 (0%) 0 (0%) 0 (0%) 0 (0%) 35d\n kube-system coredns-6f8ddbf466-h7ls2 100m (2%) 0 (0%) 70Mi (0%) 170Mi (2%) 35d\n kube-system coredns-6f8ddbf466-ktps8 100m (2%) 0 (0%) 70Mi (0%) 170Mi (2%) 35d\n kube-system etcd-master1 0 (0%) 0 (0%) 0 (0%) 0 (0%) 13d\n kube-system ke-logservice-7bd6c594d9-k8pxn 100m (2%) 100m (2%) 128Mi (1%) 128Mi (1%) 32d\n kube-system ke-promgate-84549b94b4-fzc47 100m (2%) 100m (2%) 128Mi (1%) 128Mi (1%) 32d\n kube-system kube-apiserver-master1 250m (6%) 0 (0%) 0 (0%) 0 (0%) 35d\n kube-system kube-controller-manager-master1 200m (5%) 0 (0%) 0 (0%) 0 (0%) 35d\n kube-system kube-proxy-hw5jr 0 (0%) 0 (0%) 0 (0%) 0 (0%) 35d\n kube-system kube-scheduler-master1 100m (2%) 0 (0%) 0 (0%) 0 (0%) 35d\n kube-system logkit-poc-8hmd2 512m (12%) 512m (12%) 1Gi (13%) 2Gi (26%) 2d23h\n kube-system prometheus-operator-prometheus-node-exporter-h87gt 100m (2%) 1 (25%) 256Mi (3%) 2Gi (26%) 32d\n qce kirk-apiserver-6b877699dc-zhtq6 0 (0%) 0 (0%) 0 (0%) 0 (0%) 22h\n qce kirk-app-controller-6b65f96d94-j7m4p 0 (0%) 0 (0%) 0 (0%) 0 (0%) 19h\n qce qce-authgate-deploy-7dd7b5979b-db5fk 0 (0%) 0 (0%) 0 (0%) 0 (0%) 21d\n qce qce-controller-deploy-74467f54c4-flpqt 0 (0%) 0 (0%) 0 (0%) 0 (0%) 34d\n qce qce-deploy-75846b7d7-n9g8w 0 (0%) 0 (0%) 0 (0%) 0 (0%) 14d\n qce qce-hubsync-sts-0 0 (0%) 0 (0%) 0 (0%) 0 (0%) 34d\n qce qce-redis-deploy-74bc95ff86-lszcm 0 (0%) 0 (0%) 0 (0%) 0 (0%) 34d\n qce qce-registry-sts-0 0 (0%) 0 (0%) 0 (0%) 0 (0%) 34d\n qce qce-sonarqube-0 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2d11h\n qce qce-tiller-deploy-b45df78fc-qwpkk 0 (0%) 0 (0%) 0 (0%) 0 (0%) 34d\n qce qce-webhook-deploy-694d74679d-6v7xx 0 (0%) 0 (0%) 0 (0%) 0 (0%) 34d\nAllocated resources:\n (Total limits may be over 100 percent, i.e., overcommitted.)\n Resource Requests Limits\n -------- -------- ------\n cpu 1812m (45%) 1712m (42%)\n memory 1676Mi (21%) 4692Mi (60%)\n ephemeral-storage 0 (0%) 0 (0%)\nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Warning FreeDiskSpaceFailed 11m kubelet, master1 failed to garbage collect required amount of images. Wanted to free 2434138112 bytes, but freed 2066891309 bytes\n" +Jun 18 07:26:35.755: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 describe namespace e2e-tests-kubectl-5kf7w' +Jun 18 07:26:37.520: INFO: stderr: "" +Jun 18 07:26:37.520: INFO: stdout: "Name: e2e-tests-kubectl-5kf7w\nLabels: e2e-framework=kubectl\n e2e-run=8b392b75-9198-11e9-bbf5-0e74dabf3615\nAnnotations: \nStatus: Active\n\nNo resource quota.\n\nNo resource limits.\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:26:37.520: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-5kf7w" for this suite. +Jun 18 07:27:07.648: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:27:07.683: INFO: namespace: e2e-tests-kubectl-5kf7w, resource: bindings, ignored listing per whitelist +Jun 18 07:27:07.965: INFO: namespace e2e-tests-kubectl-5kf7w deletion completed in 30.375231625s + +• [SLOW TEST:39.362 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Kubectl describe + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should check if kubectl describe prints relevant information for rc and pods [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSS +------------------------------ +[sig-network] Networking Granular Checks: Pods + should function for intra-pod communication: http [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-network] Networking + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:27:07.965: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename pod-network-test +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-pod-network-test-27m97 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should function for intra-pod communication: http [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Performing setup for networking test in namespace e2e-tests-pod-network-test-27m97 +STEP: creating a selector +STEP: Creating the service pods in kubernetes +Jun 18 07:27:08.509: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable +STEP: Creating test pods +Jun 18 07:27:31.579: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://171.171.33.153:8080/dial?request=hostName&protocol=http&host=171.171.104.18&port=8080&tries=1'] Namespace:e2e-tests-pod-network-test-27m97 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +Jun 18 07:27:31.579: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +Jun 18 07:27:31.666: INFO: Waiting for endpoints: map[] +Jun 18 07:27:31.669: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://171.171.33.153:8080/dial?request=hostName&protocol=http&host=171.171.135.62&port=8080&tries=1'] Namespace:e2e-tests-pod-network-test-27m97 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +Jun 18 07:27:31.669: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +Jun 18 07:27:31.747: INFO: Waiting for endpoints: map[] +Jun 18 07:27:31.749: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://171.171.33.153:8080/dial?request=hostName&protocol=http&host=171.171.166.131&port=8080&tries=1'] Namespace:e2e-tests-pod-network-test-27m97 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +Jun 18 07:27:31.749: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +Jun 18 07:27:31.855: INFO: Waiting for endpoints: map[] +Jun 18 07:27:31.858: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://171.171.33.153:8080/dial?request=hostName&protocol=http&host=171.171.33.148&port=8080&tries=1'] Namespace:e2e-tests-pod-network-test-27m97 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +Jun 18 07:27:31.858: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +Jun 18 07:27:31.929: INFO: Waiting for endpoints: map[] +Jun 18 07:27:31.931: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://171.171.33.153:8080/dial?request=hostName&protocol=http&host=171.171.3.124&port=8080&tries=1'] Namespace:e2e-tests-pod-network-test-27m97 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +Jun 18 07:27:31.931: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +Jun 18 07:27:32.005: INFO: Waiting for endpoints: map[] +[AfterEach] [sig-network] Networking + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:27:32.005: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-pod-network-test-27m97" for this suite. +Jun 18 07:27:58.525: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:27:58.640: INFO: namespace: e2e-tests-pod-network-test-27m97, resource: bindings, ignored listing per whitelist +Jun 18 07:27:58.839: INFO: namespace e2e-tests-pod-network-test-27m97 deletion completed in 26.829192852s + +• [SLOW TEST:50.874 seconds] +[sig-network] Networking +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:25 + Granular Checks: Pods + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:28 + should function for intra-pod communication: http [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SS +------------------------------ +[sig-storage] EmptyDir volumes + should support (root,0777,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:27:58.839: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename emptydir +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-4mm2z +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (root,0777,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test emptydir 0777 on tmpfs +Jun 18 07:27:59.527: INFO: Waiting up to 5m0s for pod "pod-9956cc5a-919a-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-emptydir-4mm2z" to be "success or failure" +Jun 18 07:27:59.533: INFO: Pod "pod-9956cc5a-919a-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 6.24104ms +Jun 18 07:28:01.536: INFO: Pod "pod-9956cc5a-919a-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.008970576s +STEP: Saw pod success +Jun 18 07:28:01.536: INFO: Pod "pod-9956cc5a-919a-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:28:01.538: INFO: Trying to get logs from node node5 pod pod-9956cc5a-919a-11e9-bbf5-0e74dabf3615 container test-container: +STEP: delete the pod +Jun 18 07:28:01.563: INFO: Waiting for pod pod-9956cc5a-919a-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:28:01.567: INFO: Pod pod-9956cc5a-919a-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:28:01.567: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-emptydir-4mm2z" for this suite. +Jun 18 07:28:09.583: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:28:09.681: INFO: namespace: e2e-tests-emptydir-4mm2z, resource: bindings, ignored listing per whitelist +Jun 18 07:28:10.584: INFO: namespace e2e-tests-emptydir-4mm2z deletion completed in 9.013520929s + +• [SLOW TEST:11.745 seconds] +[sig-storage] EmptyDir volumes +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40 + should support (root,0777,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Update Demo + should create and stop a replication controller [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:28:10.584: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-6nhn4 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[BeforeEach] [k8s.io] Update Demo + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:295 +[It] should create and stop a replication controller [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating a replication controller +Jun 18 07:28:11.528: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 create -f - --namespace=e2e-tests-kubectl-6nhn4' +Jun 18 07:28:11.758: INFO: stderr: "" +Jun 18 07:28:11.758: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n" +STEP: waiting for all containers in name=update-demo pods to come up. +Jun 18 07:28:11.758: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-6nhn4' +Jun 18 07:28:11.844: INFO: stderr: "" +Jun 18 07:28:11.844: INFO: stdout: "update-demo-nautilus-p5g55 update-demo-nautilus-x2jbr " +Jun 18 07:28:11.844: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods update-demo-nautilus-p5g55 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-6nhn4' +Jun 18 07:28:11.966: INFO: stderr: "" +Jun 18 07:28:11.966: INFO: stdout: "" +Jun 18 07:28:11.966: INFO: update-demo-nautilus-p5g55 is created but not running +Jun 18 07:28:16.966: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-6nhn4' +Jun 18 07:28:17.045: INFO: stderr: "" +Jun 18 07:28:17.045: INFO: stdout: "update-demo-nautilus-p5g55 update-demo-nautilus-x2jbr " +Jun 18 07:28:17.045: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods update-demo-nautilus-p5g55 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-6nhn4' +Jun 18 07:28:17.120: INFO: stderr: "" +Jun 18 07:28:17.120: INFO: stdout: "true" +Jun 18 07:28:17.120: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods update-demo-nautilus-p5g55 -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-6nhn4' +Jun 18 07:28:17.523: INFO: stderr: "" +Jun 18 07:28:17.523: INFO: stdout: "reg.kpaas.io/kubernetes-e2e-test-images/nautilus:1.0" +Jun 18 07:28:17.523: INFO: validating pod update-demo-nautilus-p5g55 +Jun 18 07:28:17.527: INFO: got data: { + "image": "nautilus.jpg" +} + +Jun 18 07:28:17.527: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Jun 18 07:28:17.527: INFO: update-demo-nautilus-p5g55 is verified up and running +Jun 18 07:28:17.527: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods update-demo-nautilus-x2jbr -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-6nhn4' +Jun 18 07:28:17.606: INFO: stderr: "" +Jun 18 07:28:17.606: INFO: stdout: "true" +Jun 18 07:28:17.607: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods update-demo-nautilus-x2jbr -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-6nhn4' +Jun 18 07:28:17.689: INFO: stderr: "" +Jun 18 07:28:17.689: INFO: stdout: "reg.kpaas.io/kubernetes-e2e-test-images/nautilus:1.0" +Jun 18 07:28:17.689: INFO: validating pod update-demo-nautilus-x2jbr +Jun 18 07:28:17.693: INFO: got data: { + "image": "nautilus.jpg" +} + +Jun 18 07:28:17.693: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Jun 18 07:28:17.693: INFO: update-demo-nautilus-x2jbr is verified up and running +STEP: using delete to clean up resources +Jun 18 07:28:17.693: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-6nhn4' +Jun 18 07:28:17.776: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Jun 18 07:28:17.777: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n" +Jun 18 07:28:17.777: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get rc,svc -l name=update-demo --no-headers --namespace=e2e-tests-kubectl-6nhn4' +Jun 18 07:28:18.522: INFO: stderr: "No resources found.\n" +Jun 18 07:28:18.522: INFO: stdout: "" +Jun 18 07:28:18.522: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods -l name=update-demo --namespace=e2e-tests-kubectl-6nhn4 -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' +Jun 18 07:28:18.641: INFO: stderr: "" +Jun 18 07:28:18.641: INFO: stdout: "" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:28:18.641: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-6nhn4" for this suite. +Jun 18 07:28:44.658: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:28:44.690: INFO: namespace: e2e-tests-kubectl-6nhn4, resource: bindings, ignored listing per whitelist +Jun 18 07:28:45.546: INFO: namespace e2e-tests-kubectl-6nhn4 deletion completed in 26.899841838s + +• [SLOW TEST:34.961 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Update Demo + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should create and stop a replication controller [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +S +------------------------------ +[k8s.io] Pods + should be submitted and removed [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:28:45.546: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename pods +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-pods-7fwsn +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:132 +[It] should be submitted and removed [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating the pod +STEP: setting up watch +STEP: submitting the pod to kubernetes +STEP: verifying the pod is in kubernetes +STEP: verifying pod creation was observed +Jun 18 07:28:48.547: INFO: running pod: &v1.Pod{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"pod-submit-remove-b55bc197-919a-11e9-bbf5-0e74dabf3615", GenerateName:"", Namespace:"e2e-tests-pods-7fwsn", SelfLink:"/api/v1/namespaces/e2e-tests-pods-7fwsn/pods/pod-submit-remove-b55bc197-919a-11e9-bbf5-0e74dabf3615", UID:"b55ce0aa-919a-11e9-8cfd-00163e000a67", ResourceVersion:"13528744", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63696439726, loc:(*time.Location)(0x7b57be0)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"time":"519138940", "name":"foo"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Initializers:(*v1.Initializers)(nil), Finalizers:[]string(nil), ClusterName:""}, Spec:v1.PodSpec{Volumes:[]v1.Volume{v1.Volume{Name:"default-token-lvjjq", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(nil), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(0xc00261ba40), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil)}}}, InitContainers:[]v1.Container(nil), Containers:[]v1.Container{v1.Container{Name:"nginx", Image:"docker.io/library/nginx:1.14-alpine", Command:[]string(nil), Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-lvjjq", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil)}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc0024b7fd8), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string(nil), ServiceAccountName:"default", DeprecatedServiceAccount:"default", AutomountServiceAccountToken:(*bool)(nil), NodeName:"node5", HostNetwork:false, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc0024fd020), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(nil), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration{v1.Toleration{Key:"node.kubernetes.io/not-ready", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc0026d0020)}, v1.Toleration{Key:"node.kubernetes.io/unreachable", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc0026d0040)}}, HostAliases:[]v1.HostAlias(nil), PriorityClassName:"", Priority:(*int32)(0xc0026d0048), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(0xc0026d004c)}, Status:v1.PodStatus{Phase:"Running", Conditions:[]v1.PodCondition{v1.PodCondition{Type:"Initialized", Status:"True", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696439726, loc:(*time.Location)(0x7b57be0)}}, Reason:"", Message:""}, v1.PodCondition{Type:"Ready", Status:"True", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696439728, loc:(*time.Location)(0x7b57be0)}}, Reason:"", Message:""}, v1.PodCondition{Type:"ContainersReady", Status:"True", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696439728, loc:(*time.Location)(0x7b57be0)}}, Reason:"", Message:""}, v1.PodCondition{Type:"PodScheduled", Status:"True", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696439726, loc:(*time.Location)(0x7b57be0)}}, Reason:"", Message:""}}, Message:"", Reason:"", NominatedNodeName:"", HostIP:"192.168.2.155", PodIP:"171.171.33.150", StartTime:(*v1.Time)(0xc0018beb40), InitContainerStatuses:[]v1.ContainerStatus(nil), ContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"nginx", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(0xc0018beb60), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:true, RestartCount:0, Image:"nginx:1.14-alpine", ImageID:"docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7", ContainerID:"docker://5446617e0dcb7e3ae65e0953d07dcd02c82314f66640d66c53c72784aa5d396d"}}, QOSClass:"BestEffort"}} +STEP: deleting the pod gracefully +STEP: verifying the kubelet observed the termination notice +STEP: verifying pod deletion was observed +[AfterEach] [k8s.io] Pods + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:28:58.579: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-pods-7fwsn" for this suite. +Jun 18 07:29:06.596: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:29:06.629: INFO: namespace: e2e-tests-pods-7fwsn, resource: bindings, ignored listing per whitelist +Jun 18 07:29:06.918: INFO: namespace e2e-tests-pods-7fwsn deletion completed in 8.330155147s + +• [SLOW TEST:21.372 seconds] +[k8s.io] Pods +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should be submitted and removed [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSS +------------------------------ +[sig-apps] Daemon set [Serial] + should run and stop complex daemon [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:29:06.918: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename daemonsets +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-daemonsets-7f52p +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:102 +[It] should run and stop complex daemon [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +Jun 18 07:29:07.634: INFO: Creating daemon "daemon-set" with a node selector +STEP: Initially, daemon pods should not be running on any nodes. +Jun 18 07:29:07.642: INFO: Number of nodes with available pods: 0 +Jun 18 07:29:07.642: INFO: Number of running nodes: 0, number of available pods: 0 +STEP: Change node label to blue, check that daemon pod is launched. +Jun 18 07:29:07.658: INFO: Number of nodes with available pods: 0 +Jun 18 07:29:07.658: INFO: Node node1 is running more than one daemon pod +Jun 18 07:29:08.660: INFO: Number of nodes with available pods: 0 +Jun 18 07:29:08.660: INFO: Node node1 is running more than one daemon pod +Jun 18 07:29:09.660: INFO: Number of nodes with available pods: 0 +Jun 18 07:29:09.660: INFO: Node node1 is running more than one daemon pod +Jun 18 07:29:10.660: INFO: Number of nodes with available pods: 1 +Jun 18 07:29:10.660: INFO: Number of running nodes: 1, number of available pods: 1 +STEP: Update the node label to green, and wait for daemons to be unscheduled +Jun 18 07:29:10.674: INFO: Number of nodes with available pods: 1 +Jun 18 07:29:10.674: INFO: Number of running nodes: 0, number of available pods: 1 +Jun 18 07:29:11.678: INFO: Number of nodes with available pods: 0 +Jun 18 07:29:11.678: INFO: Number of running nodes: 0, number of available pods: 0 +STEP: Update DaemonSet node selector to green, and change its update strategy to RollingUpdate +Jun 18 07:29:11.713: INFO: Number of nodes with available pods: 0 +Jun 18 07:29:11.713: INFO: Node node1 is running more than one daemon pod +Jun 18 07:29:12.715: INFO: Number of nodes with available pods: 0 +Jun 18 07:29:12.715: INFO: Node node1 is running more than one daemon pod +Jun 18 07:29:13.716: INFO: Number of nodes with available pods: 0 +Jun 18 07:29:13.716: INFO: Node node1 is running more than one daemon pod +Jun 18 07:29:15.543: INFO: Number of nodes with available pods: 0 +Jun 18 07:29:15.543: INFO: Node node1 is running more than one daemon pod +Jun 18 07:29:15.716: INFO: Number of nodes with available pods: 0 +Jun 18 07:29:15.716: INFO: Node node1 is running more than one daemon pod +Jun 18 07:29:16.716: INFO: Number of nodes with available pods: 0 +Jun 18 07:29:16.716: INFO: Node node1 is running more than one daemon pod +Jun 18 07:29:17.716: INFO: Number of nodes with available pods: 0 +Jun 18 07:29:17.716: INFO: Node node1 is running more than one daemon pod +Jun 18 07:29:18.718: INFO: Number of nodes with available pods: 0 +Jun 18 07:29:18.718: INFO: Node node1 is running more than one daemon pod +Jun 18 07:29:19.716: INFO: Number of nodes with available pods: 0 +Jun 18 07:29:19.716: INFO: Node node1 is running more than one daemon pod +Jun 18 07:29:20.716: INFO: Number of nodes with available pods: 1 +Jun 18 07:29:20.716: INFO: Number of running nodes: 1, number of available pods: 1 +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:68 +STEP: Deleting DaemonSet "daemon-set" +STEP: deleting DaemonSet.extensions daemon-set in namespace e2e-tests-daemonsets-7f52p, will wait for the garbage collector to delete the pods +Jun 18 07:29:21.516: INFO: Deleting DaemonSet.extensions daemon-set took: 745.048465ms +Jun 18 07:29:21.616: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.298225ms +Jun 18 07:29:27.619: INFO: Number of nodes with available pods: 0 +Jun 18 07:29:27.619: INFO: Number of running nodes: 0, number of available pods: 0 +Jun 18 07:29:27.622: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/e2e-tests-daemonsets-7f52p/daemonsets","resourceVersion":"13528997"},"items":null} + +Jun 18 07:29:27.624: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/e2e-tests-daemonsets-7f52p/pods","resourceVersion":"13528997"},"items":null} + +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:29:27.644: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-daemonsets-7f52p" for this suite. +Jun 18 07:29:37.656: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:29:37.669: INFO: namespace: e2e-tests-daemonsets-7f52p, resource: bindings, ignored listing per whitelist +Jun 18 07:29:37.972: INFO: namespace e2e-tests-daemonsets-7f52p deletion completed in 10.324518274s + +• [SLOW TEST:31.054 seconds] +[sig-apps] Daemon set [Serial] +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + should run and stop complex daemon [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] Daemon set [Serial] + should run and stop simple daemon [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:29:37.972: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename daemonsets +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-daemonsets-qbx9x +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:102 +[It] should run and stop simple daemon [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating simple DaemonSet "daemon-set" +STEP: Check that daemon pods launch on every node of the cluster. +Jun 18 07:29:38.670: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:29:38.674: INFO: Number of nodes with available pods: 0 +Jun 18 07:29:38.674: INFO: Node node1 is running more than one daemon pod +Jun 18 07:29:39.679: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:29:39.683: INFO: Number of nodes with available pods: 0 +Jun 18 07:29:39.683: INFO: Node node1 is running more than one daemon pod +Jun 18 07:29:40.679: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:29:40.696: INFO: Number of nodes with available pods: 0 +Jun 18 07:29:40.696: INFO: Node node1 is running more than one daemon pod +Jun 18 07:29:41.678: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:29:41.681: INFO: Number of nodes with available pods: 1 +Jun 18 07:29:41.681: INFO: Node node1 is running more than one daemon pod +Jun 18 07:29:42.678: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:29:42.680: INFO: Number of nodes with available pods: 4 +Jun 18 07:29:42.680: INFO: Node node4 is running more than one daemon pod +Jun 18 07:29:43.678: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:29:43.681: INFO: Number of nodes with available pods: 4 +Jun 18 07:29:43.681: INFO: Node node4 is running more than one daemon pod +Jun 18 07:29:44.678: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:29:44.681: INFO: Number of nodes with available pods: 4 +Jun 18 07:29:44.681: INFO: Node node4 is running more than one daemon pod +Jun 18 07:29:45.678: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:29:45.681: INFO: Number of nodes with available pods: 4 +Jun 18 07:29:45.681: INFO: Node node4 is running more than one daemon pod +Jun 18 07:29:46.678: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:29:46.681: INFO: Number of nodes with available pods: 4 +Jun 18 07:29:46.681: INFO: Node node4 is running more than one daemon pod +Jun 18 07:29:47.678: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:29:47.681: INFO: Number of nodes with available pods: 4 +Jun 18 07:29:47.681: INFO: Node node4 is running more than one daemon pod +Jun 18 07:29:48.685: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:29:48.689: INFO: Number of nodes with available pods: 4 +Jun 18 07:29:48.689: INFO: Node node4 is running more than one daemon pod +Jun 18 07:29:49.678: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:29:49.682: INFO: Number of nodes with available pods: 4 +Jun 18 07:29:49.682: INFO: Node node4 is running more than one daemon pod +Jun 18 07:29:50.677: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:29:50.680: INFO: Number of nodes with available pods: 4 +Jun 18 07:29:50.680: INFO: Node node4 is running more than one daemon pod +Jun 18 07:29:51.680: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:29:51.682: INFO: Number of nodes with available pods: 4 +Jun 18 07:29:51.682: INFO: Node node4 is running more than one daemon pod +Jun 18 07:29:52.678: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:29:52.681: INFO: Number of nodes with available pods: 4 +Jun 18 07:29:52.681: INFO: Node node4 is running more than one daemon pod +Jun 18 07:29:53.678: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:29:53.680: INFO: Number of nodes with available pods: 4 +Jun 18 07:29:53.680: INFO: Node node4 is running more than one daemon pod +Jun 18 07:29:54.678: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:29:54.681: INFO: Number of nodes with available pods: 4 +Jun 18 07:29:54.681: INFO: Node node4 is running more than one daemon pod +Jun 18 07:29:55.677: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:29:55.680: INFO: Number of nodes with available pods: 4 +Jun 18 07:29:55.680: INFO: Node node4 is running more than one daemon pod +Jun 18 07:29:56.678: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:29:56.681: INFO: Number of nodes with available pods: 4 +Jun 18 07:29:56.681: INFO: Node node4 is running more than one daemon pod +Jun 18 07:29:57.678: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:29:57.681: INFO: Number of nodes with available pods: 4 +Jun 18 07:29:57.681: INFO: Node node4 is running more than one daemon pod +Jun 18 07:29:58.677: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:29:58.679: INFO: Number of nodes with available pods: 4 +Jun 18 07:29:58.680: INFO: Node node4 is running more than one daemon pod +Jun 18 07:29:59.678: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:29:59.681: INFO: Number of nodes with available pods: 4 +Jun 18 07:29:59.681: INFO: Node node4 is running more than one daemon pod +Jun 18 07:30:00.679: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:30:00.698: INFO: Number of nodes with available pods: 4 +Jun 18 07:30:00.698: INFO: Node node4 is running more than one daemon pod +Jun 18 07:30:01.678: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:30:01.681: INFO: Number of nodes with available pods: 4 +Jun 18 07:30:01.681: INFO: Node node4 is running more than one daemon pod +Jun 18 07:30:02.683: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:30:02.690: INFO: Number of nodes with available pods: 4 +Jun 18 07:30:02.690: INFO: Node node4 is running more than one daemon pod +Jun 18 07:30:03.678: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:30:03.681: INFO: Number of nodes with available pods: 4 +Jun 18 07:30:03.681: INFO: Node node4 is running more than one daemon pod +Jun 18 07:30:04.678: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:30:04.681: INFO: Number of nodes with available pods: 4 +Jun 18 07:30:04.681: INFO: Node node4 is running more than one daemon pod +Jun 18 07:30:05.679: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:30:05.682: INFO: Number of nodes with available pods: 4 +Jun 18 07:30:05.682: INFO: Node node4 is running more than one daemon pod +Jun 18 07:30:06.678: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:30:06.680: INFO: Number of nodes with available pods: 5 +Jun 18 07:30:06.680: INFO: Number of running nodes: 5, number of available pods: 5 +STEP: Stop a daemon pod, check that the daemon pod is revived. +Jun 18 07:30:06.691: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:30:06.694: INFO: Number of nodes with available pods: 4 +Jun 18 07:30:06.694: INFO: Node node4 is running more than one daemon pod +Jun 18 07:30:07.698: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:30:07.700: INFO: Number of nodes with available pods: 4 +Jun 18 07:30:07.700: INFO: Node node4 is running more than one daemon pod +Jun 18 07:30:08.699: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:30:08.701: INFO: Number of nodes with available pods: 4 +Jun 18 07:30:08.701: INFO: Node node4 is running more than one daemon pod +Jun 18 07:30:09.701: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:30:09.704: INFO: Number of nodes with available pods: 4 +Jun 18 07:30:09.704: INFO: Node node4 is running more than one daemon pod +Jun 18 07:30:10.698: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:30:10.705: INFO: Number of nodes with available pods: 4 +Jun 18 07:30:10.705: INFO: Node node4 is running more than one daemon pod +Jun 18 07:30:11.698: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:30:11.700: INFO: Number of nodes with available pods: 4 +Jun 18 07:30:11.700: INFO: Node node4 is running more than one daemon pod +Jun 18 07:30:12.697: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:30:12.700: INFO: Number of nodes with available pods: 4 +Jun 18 07:30:12.700: INFO: Node node4 is running more than one daemon pod +Jun 18 07:30:13.698: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Jun 18 07:30:13.700: INFO: Number of nodes with available pods: 5 +Jun 18 07:30:13.700: INFO: Number of running nodes: 5, number of available pods: 5 +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:68 +STEP: Deleting DaemonSet "daemon-set" +STEP: deleting DaemonSet.extensions daemon-set in namespace e2e-tests-daemonsets-qbx9x, will wait for the garbage collector to delete the pods +Jun 18 07:30:13.759: INFO: Deleting DaemonSet.extensions daemon-set took: 4.521811ms +Jun 18 07:30:14.559: INFO: Terminating DaemonSet.extensions daemon-set pods took: 800.230305ms +Jun 18 07:30:27.513: INFO: Number of nodes with available pods: 0 +Jun 18 07:30:27.513: INFO: Number of running nodes: 0, number of available pods: 0 +Jun 18 07:30:27.516: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/e2e-tests-daemonsets-qbx9x/daemonsets","resourceVersion":"13529410"},"items":null} + +Jun 18 07:30:27.520: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/e2e-tests-daemonsets-qbx9x/pods","resourceVersion":"13529410"},"items":null} + +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:30:27.535: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-daemonsets-qbx9x" for this suite. +Jun 18 07:30:37.556: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:30:37.676: INFO: namespace: e2e-tests-daemonsets-qbx9x, resource: bindings, ignored listing per whitelist +Jun 18 07:30:38.518: INFO: namespace e2e-tests-daemonsets-qbx9x deletion completed in 10.980281789s + +• [SLOW TEST:60.546 seconds] +[sig-apps] Daemon set [Serial] +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + should run and stop simple daemon [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] ConfigMap + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:30:38.519: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename configmap +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-configmap-w57m9 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap with name configmap-test-volume-map-f8b49e7d-919a-11e9-bbf5-0e74dabf3615 +STEP: Creating a pod to test consume configMaps +Jun 18 07:30:39.542: INFO: Waiting up to 5m0s for pod "pod-configmaps-f8b88a34-919a-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-configmap-w57m9" to be "success or failure" +Jun 18 07:30:39.549: INFO: Pod "pod-configmaps-f8b88a34-919a-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 7.41864ms +Jun 18 07:30:41.576: INFO: Pod "pod-configmaps-f8b88a34-919a-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.034356653s +STEP: Saw pod success +Jun 18 07:30:41.576: INFO: Pod "pod-configmaps-f8b88a34-919a-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:30:41.580: INFO: Trying to get logs from node node5 pod pod-configmaps-f8b88a34-919a-11e9-bbf5-0e74dabf3615 container configmap-volume-test: +STEP: delete the pod +Jun 18 07:30:41.608: INFO: Waiting for pod pod-configmaps-f8b88a34-919a-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:30:41.676: INFO: Pod pod-configmaps-f8b88a34-919a-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:30:41.676: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-configmap-w57m9" for this suite. +Jun 18 07:30:54.641: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:30:55.623: INFO: namespace: e2e-tests-configmap-w57m9, resource: bindings, ignored listing per whitelist +Jun 18 07:30:56.516: INFO: namespace e2e-tests-configmap-w57m9 deletion completed in 13.96830656s + +• [SLOW TEST:17.998 seconds] +[sig-storage] ConfigMap +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33 + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Watchers + should be able to restart watching from the last resource version observed by the previous watch [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-api-machinery] Watchers + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:30:56.517: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename watch +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-watch-5v92v +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be able to restart watching from the last resource version observed by the previous watch [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating a watch on configmaps +STEP: creating a new configmap +STEP: modifying the configmap once +STEP: closing the watch once it receives two notifications +Jun 18 07:30:56.719: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-watch-closed,GenerateName:,Namespace:e2e-tests-watch-5v92v,SelfLink:/api/v1/namespaces/e2e-tests-watch-5v92v/configmaps/e2e-watch-test-watch-closed,UID:02f4f321-919b-11e9-8cfd-00163e000a67,ResourceVersion:13529625,Generation:0,CreationTimestamp:2019-06-18 07:30:56 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: watch-closed-and-restarted,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{},BinaryData:map[string][]byte{},} +Jun 18 07:30:56.719: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-watch-closed,GenerateName:,Namespace:e2e-tests-watch-5v92v,SelfLink:/api/v1/namespaces/e2e-tests-watch-5v92v/configmaps/e2e-watch-test-watch-closed,UID:02f4f321-919b-11e9-8cfd-00163e000a67,ResourceVersion:13529626,Generation:0,CreationTimestamp:2019-06-18 07:30:56 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: watch-closed-and-restarted,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},} +STEP: modifying the configmap a second time, while the watch is closed +STEP: creating a new watch on configmaps from the last resource version observed by the first watch +STEP: deleting the configmap +STEP: Expecting to observe notifications for all changes to the configmap since the first watch closed +Jun 18 07:30:56.729: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-watch-closed,GenerateName:,Namespace:e2e-tests-watch-5v92v,SelfLink:/api/v1/namespaces/e2e-tests-watch-5v92v/configmaps/e2e-watch-test-watch-closed,UID:02f4f321-919b-11e9-8cfd-00163e000a67,ResourceVersion:13529627,Generation:0,CreationTimestamp:2019-06-18 07:30:56 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: watch-closed-and-restarted,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},} +Jun 18 07:30:56.729: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-watch-closed,GenerateName:,Namespace:e2e-tests-watch-5v92v,SelfLink:/api/v1/namespaces/e2e-tests-watch-5v92v/configmaps/e2e-watch-test-watch-closed,UID:02f4f321-919b-11e9-8cfd-00163e000a67,ResourceVersion:13529628,Generation:0,CreationTimestamp:2019-06-18 07:30:56 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: watch-closed-and-restarted,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},} +[AfterEach] [sig-api-machinery] Watchers + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:30:56.730: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-watch-5v92v" for this suite. +Jun 18 07:31:06.745: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:31:07.621: INFO: namespace: e2e-tests-watch-5v92v, resource: bindings, ignored listing per whitelist +Jun 18 07:31:07.765: INFO: namespace e2e-tests-watch-5v92v deletion completed in 11.033029739s + +• [SLOW TEST:11.249 seconds] +[sig-api-machinery] Watchers +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + should be able to restart watching from the last resource version observed by the previous watch [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSS +------------------------------ +[sig-network] Service endpoints latency + should not be very high [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-network] Service endpoints latency + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:31:07.766: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename svc-latency +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-svc-latency-966hk +STEP: Waiting for a default service account to be provisioned in namespace +[It] should not be very high [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating replication controller svc-latency-rc in namespace e2e-tests-svc-latency-966hk +I0618 07:31:09.602524 16 runners.go:184] Created replication controller with name: svc-latency-rc, namespace: e2e-tests-svc-latency-966hk, replica count: 1 +I0618 07:31:10.652886 16 runners.go:184] svc-latency-rc Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0618 07:31:11.653101 16 runners.go:184] svc-latency-rc Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0618 07:31:12.653341 16 runners.go:184] svc-latency-rc Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0618 07:31:13.653514 16 runners.go:184] svc-latency-rc Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0618 07:31:14.653696 16 runners.go:184] svc-latency-rc Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0618 07:31:15.653952 16 runners.go:184] svc-latency-rc Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0618 07:31:16.654147 16 runners.go:184] svc-latency-rc Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0618 07:31:17.654434 16 runners.go:184] svc-latency-rc Pods: 1 out of 1 created, 1 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Jun 18 07:31:18.535: INFO: Created: latency-svc-7kf8s +Jun 18 07:31:19.521: INFO: Got endpoints: latency-svc-7kf8s [1.766925273s] +Jun 18 07:31:19.606: INFO: Created: latency-svc-fbzl5 +Jun 18 07:31:19.650: INFO: Got endpoints: latency-svc-fbzl5 [128.766439ms] +Jun 18 07:31:19.658: INFO: Created: latency-svc-gsc7r +Jun 18 07:31:20.508: INFO: Got endpoints: latency-svc-gsc7r [986.35005ms] +Jun 18 07:31:20.538: INFO: Created: latency-svc-2jxvg +Jun 18 07:31:20.551: INFO: Got endpoints: latency-svc-2jxvg [1.02968306s] +Jun 18 07:31:20.562: INFO: Created: latency-svc-k8shx +Jun 18 07:31:20.573: INFO: Got endpoints: latency-svc-k8shx [1.051717137s] +Jun 18 07:31:20.586: INFO: Created: latency-svc-57s7j +Jun 18 07:31:20.588: INFO: Got endpoints: latency-svc-57s7j [1.066183557s] +Jun 18 07:31:20.597: INFO: Created: latency-svc-f45cn +Jun 18 07:31:20.604: INFO: Got endpoints: latency-svc-f45cn [1.082390579s] +Jun 18 07:31:20.610: INFO: Created: latency-svc-9fxbl +Jun 18 07:31:20.614: INFO: Got endpoints: latency-svc-9fxbl [1.092709321s] +Jun 18 07:31:20.641: INFO: Created: latency-svc-x7b9k +Jun 18 07:31:20.649: INFO: Got endpoints: latency-svc-x7b9k [1.127674914s] +Jun 18 07:31:20.661: INFO: Created: latency-svc-ffbtw +Jun 18 07:31:20.675: INFO: Got endpoints: latency-svc-ffbtw [1.153692372s] +Jun 18 07:31:21.529: INFO: Created: latency-svc-mmc6n +Jun 18 07:31:21.542: INFO: Got endpoints: latency-svc-mmc6n [2.020227865s] +Jun 18 07:31:22.535: INFO: Created: latency-svc-gf9wn +Jun 18 07:31:22.622: INFO: Got endpoints: latency-svc-gf9wn [3.100296812s] +Jun 18 07:31:23.513: INFO: Created: latency-svc-j5p8x +Jun 18 07:31:23.531: INFO: Got endpoints: latency-svc-j5p8x [4.008864683s] +Jun 18 07:31:23.624: INFO: Created: latency-svc-2jscx +Jun 18 07:31:23.624: INFO: Created: latency-svc-qzp9j +Jun 18 07:31:23.656: INFO: Got endpoints: latency-svc-qzp9j [4.134711075s] +Jun 18 07:31:23.657: INFO: Got endpoints: latency-svc-2jscx [4.134993283s] +Jun 18 07:31:23.669: INFO: Created: latency-svc-4zl6d +Jun 18 07:31:24.533: INFO: Got endpoints: latency-svc-4zl6d [5.011509862s] +Jun 18 07:31:24.544: INFO: Created: latency-svc-mnx7f +Jun 18 07:31:24.556: INFO: Got endpoints: latency-svc-mnx7f [4.905295873s] +Jun 18 07:31:24.580: INFO: Created: latency-svc-qlmxb +Jun 18 07:31:24.589: INFO: Got endpoints: latency-svc-qlmxb [4.081183163s] +Jun 18 07:31:24.597: INFO: Created: latency-svc-vv866 +Jun 18 07:31:24.599: INFO: Got endpoints: latency-svc-vv866 [4.047189981s] +Jun 18 07:31:24.604: INFO: Created: latency-svc-6qn4k +Jun 18 07:31:24.613: INFO: Got endpoints: latency-svc-6qn4k [4.039633711s] +Jun 18 07:31:24.653: INFO: Created: latency-svc-q5rx5 +Jun 18 07:31:24.684: INFO: Created: latency-svc-xf4jm +Jun 18 07:31:24.684: INFO: Got endpoints: latency-svc-q5rx5 [4.096530175s] +Jun 18 07:31:24.691: INFO: Got endpoints: latency-svc-xf4jm [4.086662073s] +Jun 18 07:31:25.569: INFO: Created: latency-svc-69m7l +Jun 18 07:31:25.594: INFO: Got endpoints: latency-svc-69m7l [4.979444675s] +Jun 18 07:31:25.599: INFO: Created: latency-svc-mvp6h +Jun 18 07:31:25.628: INFO: Got endpoints: latency-svc-mvp6h [4.978805167s] +Jun 18 07:31:26.516: INFO: Created: latency-svc-7b7gj +Jun 18 07:31:26.549: INFO: Got endpoints: latency-svc-7b7gj [5.873179417s] +Jun 18 07:31:26.566: INFO: Created: latency-svc-z7879 +Jun 18 07:31:26.575: INFO: Got endpoints: latency-svc-z7879 [5.033381919s] +Jun 18 07:31:26.603: INFO: Created: latency-svc-8hgnx +Jun 18 07:31:26.609: INFO: Got endpoints: latency-svc-8hgnx [3.986480336s] +Jun 18 07:31:26.648: INFO: Created: latency-svc-29s47 +Jun 18 07:31:26.671: INFO: Got endpoints: latency-svc-29s47 [3.14062479s] +Jun 18 07:31:27.515: INFO: Created: latency-svc-bvb56 +Jun 18 07:31:27.581: INFO: Got endpoints: latency-svc-bvb56 [3.924384845s] +Jun 18 07:31:27.601: INFO: Created: latency-svc-5l5wk +Jun 18 07:31:27.607: INFO: Got endpoints: latency-svc-5l5wk [3.950664205s] +Jun 18 07:31:27.622: INFO: Created: latency-svc-5x94s +Jun 18 07:31:27.629: INFO: Got endpoints: latency-svc-5x94s [3.095760122s] +Jun 18 07:31:27.634: INFO: Created: latency-svc-w5mjq +Jun 18 07:31:27.647: INFO: Got endpoints: latency-svc-w5mjq [3.090978429s] +Jun 18 07:31:27.656: INFO: Created: latency-svc-5m9lb +Jun 18 07:31:27.665: INFO: Got endpoints: latency-svc-5m9lb [3.076390104s] +Jun 18 07:31:28.533: INFO: Created: latency-svc-hkcld +Jun 18 07:31:28.534: INFO: Got endpoints: latency-svc-hkcld [3.935309863s] +Jun 18 07:31:28.551: INFO: Created: latency-svc-zjk6f +Jun 18 07:31:28.564: INFO: Got endpoints: latency-svc-zjk6f [3.950974693s] +Jun 18 07:31:28.571: INFO: Created: latency-svc-7gdbd +Jun 18 07:31:28.576: INFO: Got endpoints: latency-svc-7gdbd [3.891843046s] +Jun 18 07:31:28.581: INFO: Created: latency-svc-br5jd +Jun 18 07:31:28.592: INFO: Got endpoints: latency-svc-br5jd [3.900888219s] +Jun 18 07:31:28.592: INFO: Created: latency-svc-ll5pt +Jun 18 07:31:28.602: INFO: Got endpoints: latency-svc-ll5pt [3.008100016s] +Jun 18 07:31:28.617: INFO: Created: latency-svc-nqx95 +Jun 18 07:31:28.631: INFO: Got endpoints: latency-svc-nqx95 [3.002927986s] +Jun 18 07:31:28.632: INFO: Created: latency-svc-5pzlr +Jun 18 07:31:28.648: INFO: Got endpoints: latency-svc-5pzlr [2.099096416s] +Jun 18 07:31:28.655: INFO: Created: latency-svc-w7clj +Jun 18 07:31:30.526: INFO: Got endpoints: latency-svc-w7clj [3.950365115s] +Jun 18 07:31:30.540: INFO: Created: latency-svc-zzls4 +Jun 18 07:31:30.621: INFO: Created: latency-svc-75ctn +Jun 18 07:31:30.639: INFO: Got endpoints: latency-svc-zzls4 [4.030591364s] +Jun 18 07:31:30.655: INFO: Got endpoints: latency-svc-75ctn [3.983725332s] +Jun 18 07:31:30.659: INFO: Created: latency-svc-vct7c +Jun 18 07:31:30.659: INFO: Got endpoints: latency-svc-vct7c [3.078116783s] +Jun 18 07:31:31.532: INFO: Created: latency-svc-zwctz +Jun 18 07:31:31.547: INFO: Got endpoints: latency-svc-zwctz [3.939915109s] +Jun 18 07:31:31.570: INFO: Created: latency-svc-khlh2 +Jun 18 07:31:31.586: INFO: Got endpoints: latency-svc-khlh2 [3.957140422s] +Jun 18 07:31:31.591: INFO: Created: latency-svc-qjz25 +Jun 18 07:31:31.595: INFO: Got endpoints: latency-svc-qjz25 [3.947937922s] +Jun 18 07:31:31.610: INFO: Created: latency-svc-s5pz8 +Jun 18 07:31:31.616: INFO: Got endpoints: latency-svc-s5pz8 [3.950406259s] +Jun 18 07:31:31.624: INFO: Created: latency-svc-pxb4f +Jun 18 07:31:31.637: INFO: Got endpoints: latency-svc-pxb4f [3.102665834s] +Jun 18 07:31:31.657: INFO: Created: latency-svc-kvmqj +Jun 18 07:31:31.664: INFO: Got endpoints: latency-svc-kvmqj [3.100085219s] +Jun 18 07:31:31.671: INFO: Created: latency-svc-2fzrv +Jun 18 07:31:31.674: INFO: Got endpoints: latency-svc-2fzrv [3.097709565s] +Jun 18 07:31:31.678: INFO: Created: latency-svc-mbn5d +Jun 18 07:31:32.514: INFO: Got endpoints: latency-svc-mbn5d [3.921872746s] +Jun 18 07:31:32.519: INFO: Created: latency-svc-qs22l +Jun 18 07:31:32.534: INFO: Got endpoints: latency-svc-qs22l [3.932478792s] +Jun 18 07:31:32.537: INFO: Created: latency-svc-9n5fv +Jun 18 07:31:32.544: INFO: Got endpoints: latency-svc-9n5fv [3.912385555s] +Jun 18 07:31:32.578: INFO: Created: latency-svc-cf96v +Jun 18 07:31:32.595: INFO: Got endpoints: latency-svc-cf96v [3.947360481s] +Jun 18 07:31:32.602: INFO: Created: latency-svc-wncxt +Jun 18 07:31:32.607: INFO: Got endpoints: latency-svc-wncxt [2.080905429s] +Jun 18 07:31:32.608: INFO: Created: latency-svc-2x6kh +Jun 18 07:31:32.620: INFO: Got endpoints: latency-svc-2x6kh [24.910298ms] +Jun 18 07:31:32.627: INFO: Created: latency-svc-wmqxg +Jun 18 07:31:32.639: INFO: Got endpoints: latency-svc-wmqxg [2.000234692s] +Jun 18 07:31:32.646: INFO: Created: latency-svc-gzxhm +Jun 18 07:31:32.652: INFO: Got endpoints: latency-svc-gzxhm [1.996572217s] +Jun 18 07:31:32.653: INFO: Created: latency-svc-p94zf +Jun 18 07:31:32.656: INFO: Got endpoints: latency-svc-p94zf [1.996403529s] +Jun 18 07:31:33.599: INFO: Created: latency-svc-6gp6p +Jun 18 07:31:33.615: INFO: Got endpoints: latency-svc-6gp6p [2.067708477s] +Jun 18 07:31:33.647: INFO: Created: latency-svc-qhx8t +Jun 18 07:31:33.655: INFO: Got endpoints: latency-svc-qhx8t [2.068443896s] +Jun 18 07:31:33.661: INFO: Created: latency-svc-7v5ff +Jun 18 07:31:33.670: INFO: Got endpoints: latency-svc-7v5ff [2.075542883s] +Jun 18 07:31:33.676: INFO: Created: latency-svc-vp4lw +Jun 18 07:31:33.676: INFO: Got endpoints: latency-svc-vp4lw [2.060529704s] +Jun 18 07:31:34.527: INFO: Created: latency-svc-cnptj +Jun 18 07:31:34.535: INFO: Got endpoints: latency-svc-cnptj [2.898566399s] +Jun 18 07:31:34.536: INFO: Created: latency-svc-2gdrp +Jun 18 07:31:34.544: INFO: Created: latency-svc-7nkhn +Jun 18 07:31:34.549: INFO: Got endpoints: latency-svc-2gdrp [2.884651093s] +Jun 18 07:31:34.556: INFO: Got endpoints: latency-svc-7nkhn [2.881678984s] +Jun 18 07:31:34.561: INFO: Created: latency-svc-hfj2m +Jun 18 07:31:34.566: INFO: Got endpoints: latency-svc-hfj2m [2.052740608s] +Jun 18 07:31:34.574: INFO: Created: latency-svc-xpfq7 +Jun 18 07:31:34.581: INFO: Got endpoints: latency-svc-xpfq7 [2.046165183s] +Jun 18 07:31:34.584: INFO: Created: latency-svc-9lhm9 +Jun 18 07:31:34.592: INFO: Got endpoints: latency-svc-9lhm9 [2.048020724s] +Jun 18 07:31:34.600: INFO: Created: latency-svc-dhqmc +Jun 18 07:31:34.606: INFO: Got endpoints: latency-svc-dhqmc [1.999624622s] +Jun 18 07:31:34.611: INFO: Created: latency-svc-kw2sn +Jun 18 07:31:34.614: INFO: Got endpoints: latency-svc-kw2sn [1.994389786s] +Jun 18 07:31:34.623: INFO: Created: latency-svc-9cmqb +Jun 18 07:31:34.624: INFO: Created: latency-svc-npqxc +Jun 18 07:31:34.627: INFO: Got endpoints: latency-svc-npqxc [1.975734592s] +Jun 18 07:31:34.628: INFO: Got endpoints: latency-svc-9cmqb [1.988165441s] +Jun 18 07:31:34.632: INFO: Created: latency-svc-j8cwb +Jun 18 07:31:34.637: INFO: Got endpoints: latency-svc-j8cwb [1.981309034s] +Jun 18 07:31:34.642: INFO: Created: latency-svc-zqghn +Jun 18 07:31:34.646: INFO: Got endpoints: latency-svc-zqghn [1.031001367s] +Jun 18 07:31:34.674: INFO: Created: latency-svc-dgrjl +Jun 18 07:31:34.682: INFO: Got endpoints: latency-svc-dgrjl [1.027337441s] +Jun 18 07:31:35.551: INFO: Created: latency-svc-lh4qz +Jun 18 07:31:35.563: INFO: Got endpoints: latency-svc-lh4qz [1.893198084s] +Jun 18 07:31:35.598: INFO: Created: latency-svc-xjxzp +Jun 18 07:31:35.623: INFO: Created: latency-svc-mmfx2 +Jun 18 07:31:36.510: INFO: Got endpoints: latency-svc-xjxzp [2.833985377s] +Jun 18 07:31:36.525: INFO: Got endpoints: latency-svc-mmfx2 [1.989452776s] +Jun 18 07:31:36.578: INFO: Created: latency-svc-8s2w4 +Jun 18 07:31:37.528: INFO: Got endpoints: latency-svc-8s2w4 [2.979485017s] +Jun 18 07:31:37.533: INFO: Created: latency-svc-cln8m +Jun 18 07:31:37.564: INFO: Got endpoints: latency-svc-cln8m [3.007890302s] +Jun 18 07:31:37.618: INFO: Created: latency-svc-9zk5t +Jun 18 07:31:37.625: INFO: Got endpoints: latency-svc-9zk5t [3.058493597s] +Jun 18 07:31:38.568: INFO: Created: latency-svc-vcdpp +Jun 18 07:31:38.575: INFO: Got endpoints: latency-svc-vcdpp [3.993848746s] +Jun 18 07:31:39.528: INFO: Created: latency-svc-nkv2b +Jun 18 07:31:39.528: INFO: Created: latency-svc-zltgf +Jun 18 07:31:39.573: INFO: Got endpoints: latency-svc-nkv2b [4.966736605s] +Jun 18 07:31:39.573: INFO: Got endpoints: latency-svc-zltgf [4.981748665s] +Jun 18 07:31:39.574: INFO: Created: latency-svc-2cfnw +Jun 18 07:31:39.596: INFO: Got endpoints: latency-svc-2cfnw [4.981876635s] +Jun 18 07:31:39.621: INFO: Created: latency-svc-dqfw4 +Jun 18 07:31:39.634: INFO: Got endpoints: latency-svc-dqfw4 [5.007011068s] +Jun 18 07:31:39.641: INFO: Created: latency-svc-qfzt7 +Jun 18 07:31:39.641: INFO: Got endpoints: latency-svc-qfzt7 [5.013635471s] +Jun 18 07:31:39.658: INFO: Created: latency-svc-4h2q5 +Jun 18 07:31:40.525: INFO: Got endpoints: latency-svc-4h2q5 [5.887678625s] +Jun 18 07:31:40.535: INFO: Created: latency-svc-xx8vq +Jun 18 07:31:40.548: INFO: Got endpoints: latency-svc-xx8vq [5.902037445s] +Jun 18 07:31:40.549: INFO: Created: latency-svc-4nzh7 +Jun 18 07:31:40.561: INFO: Got endpoints: latency-svc-4nzh7 [5.879156841s] +Jun 18 07:31:40.562: INFO: Created: latency-svc-dgrvr +Jun 18 07:31:40.576: INFO: Got endpoints: latency-svc-dgrvr [5.012900269s] +Jun 18 07:31:40.596: INFO: Created: latency-svc-tnmts +Jun 18 07:31:40.606: INFO: Got endpoints: latency-svc-tnmts [4.09513071s] +Jun 18 07:31:40.610: INFO: Created: latency-svc-bkdtt +Jun 18 07:31:40.625: INFO: Got endpoints: latency-svc-bkdtt [4.100085182s] +Jun 18 07:31:40.631: INFO: Created: latency-svc-zjvmd +Jun 18 07:31:40.637: INFO: Got endpoints: latency-svc-zjvmd [3.108528321s] +Jun 18 07:31:40.643: INFO: Created: latency-svc-5pnll +Jun 18 07:31:40.657: INFO: Got endpoints: latency-svc-5pnll [3.093273731s] +Jun 18 07:31:40.662: INFO: Created: latency-svc-6qw4t +Jun 18 07:31:40.667: INFO: Got endpoints: latency-svc-6qw4t [3.041919145s] +Jun 18 07:31:41.518: INFO: Created: latency-svc-hjlfw +Jun 18 07:31:41.549: INFO: Got endpoints: latency-svc-hjlfw [2.9743007s] +Jun 18 07:31:41.560: INFO: Created: latency-svc-6cph7 +Jun 18 07:31:41.569: INFO: Got endpoints: latency-svc-6cph7 [1.996049277s] +Jun 18 07:31:41.583: INFO: Created: latency-svc-bnfgw +Jun 18 07:31:41.595: INFO: Got endpoints: latency-svc-bnfgw [2.021126775s] +Jun 18 07:31:41.614: INFO: Created: latency-svc-69v2r +Jun 18 07:31:41.618: INFO: Got endpoints: latency-svc-69v2r [2.021451129s] +Jun 18 07:31:41.624: INFO: Created: latency-svc-ztg6g +Jun 18 07:31:41.633: INFO: Created: latency-svc-d997f +Jun 18 07:31:41.634: INFO: Got endpoints: latency-svc-ztg6g [1.999468363s] +Jun 18 07:31:41.644: INFO: Got endpoints: latency-svc-d997f [2.002898672s] +Jun 18 07:31:42.564: INFO: Created: latency-svc-nr6dz +Jun 18 07:31:42.575: INFO: Got endpoints: latency-svc-nr6dz [2.050360307s] +Jun 18 07:31:42.595: INFO: Created: latency-svc-sqct9 +Jun 18 07:31:42.622: INFO: Got endpoints: latency-svc-sqct9 [2.073925264s] +Jun 18 07:31:42.648: INFO: Created: latency-svc-88l29 +Jun 18 07:31:43.537: INFO: Got endpoints: latency-svc-88l29 [2.97582135s] +Jun 18 07:31:43.563: INFO: Created: latency-svc-tn9cq +Jun 18 07:31:43.570: INFO: Got endpoints: latency-svc-tn9cq [2.993496958s] +Jun 18 07:31:43.590: INFO: Created: latency-svc-xfqj5 +Jun 18 07:31:43.603: INFO: Got endpoints: latency-svc-xfqj5 [2.997866188s] +Jun 18 07:31:43.608: INFO: Created: latency-svc-bw5n4 +Jun 18 07:31:43.616: INFO: Got endpoints: latency-svc-bw5n4 [2.991121606s] +Jun 18 07:31:43.634: INFO: Created: latency-svc-rclf6 +Jun 18 07:31:43.648: INFO: Got endpoints: latency-svc-rclf6 [3.011239429s] +Jun 18 07:31:43.658: INFO: Created: latency-svc-4dctb +Jun 18 07:31:44.510: INFO: Got endpoints: latency-svc-4dctb [3.852756972s] +Jun 18 07:31:44.538: INFO: Created: latency-svc-x8sbb +Jun 18 07:31:44.546: INFO: Got endpoints: latency-svc-x8sbb [3.878816429s] +Jun 18 07:31:44.565: INFO: Created: latency-svc-zjxwj +Jun 18 07:31:44.570: INFO: Got endpoints: latency-svc-zjxwj [3.021076163s] +Jun 18 07:31:44.576: INFO: Created: latency-svc-6s7j6 +Jun 18 07:31:44.589: INFO: Got endpoints: latency-svc-6s7j6 [3.019590785s] +Jun 18 07:31:44.603: INFO: Created: latency-svc-gbzvz +Jun 18 07:31:44.614: INFO: Created: latency-svc-fh2n9 +Jun 18 07:31:44.615: INFO: Got endpoints: latency-svc-gbzvz [3.020534729s] +Jun 18 07:31:44.617: INFO: Got endpoints: latency-svc-fh2n9 [2.999363806s] +Jun 18 07:31:44.625: INFO: Created: latency-svc-bls8q +Jun 18 07:31:44.630: INFO: Got endpoints: latency-svc-bls8q [2.99606058s] +Jun 18 07:31:44.639: INFO: Created: latency-svc-xkdw4 +Jun 18 07:31:45.537: INFO: Got endpoints: latency-svc-xkdw4 [3.893087181s] +Jun 18 07:31:46.576: INFO: Created: latency-svc-f27bq +Jun 18 07:31:46.591: INFO: Got endpoints: latency-svc-f27bq [4.015761498s] +Jun 18 07:31:46.591: INFO: Created: latency-svc-7v6bj +Jun 18 07:31:46.601: INFO: Got endpoints: latency-svc-7v6bj [3.978716362s] +Jun 18 07:31:46.627: INFO: Created: latency-svc-cdwrc +Jun 18 07:31:46.633: INFO: Got endpoints: latency-svc-cdwrc [3.096280998s] +Jun 18 07:31:46.651: INFO: Created: latency-svc-d4rgv +Jun 18 07:31:46.657: INFO: Created: latency-svc-jk8fl +Jun 18 07:31:46.664: INFO: Got endpoints: latency-svc-d4rgv [3.093645106s] +Jun 18 07:31:46.669: INFO: Got endpoints: latency-svc-jk8fl [3.065505977s] +Jun 18 07:31:46.673: INFO: Created: latency-svc-r4njh +Jun 18 07:31:46.677: INFO: Got endpoints: latency-svc-r4njh [3.061113416s] +Jun 18 07:31:46.679: INFO: Created: latency-svc-wt25q +Jun 18 07:31:46.682: INFO: Got endpoints: latency-svc-wt25q [3.03360211s] +Jun 18 07:31:47.555: INFO: Created: latency-svc-2zpvj +Jun 18 07:31:47.570: INFO: Got endpoints: latency-svc-2zpvj [3.059828432s] +Jun 18 07:31:47.575: INFO: Created: latency-svc-98srs +Jun 18 07:31:47.584: INFO: Got endpoints: latency-svc-98srs [3.038403947s] +Jun 18 07:31:47.596: INFO: Created: latency-svc-flpcg +Jun 18 07:31:47.598: INFO: Got endpoints: latency-svc-flpcg [3.027854976s] +Jun 18 07:31:47.606: INFO: Created: latency-svc-fpd8c +Jun 18 07:31:47.612: INFO: Got endpoints: latency-svc-fpd8c [3.023466327s] +Jun 18 07:31:47.629: INFO: Created: latency-svc-v4lqk +Jun 18 07:31:47.643: INFO: Got endpoints: latency-svc-v4lqk [3.027884178s] +Jun 18 07:31:47.649: INFO: Created: latency-svc-5mv6d +Jun 18 07:31:48.536: INFO: Got endpoints: latency-svc-5mv6d [3.918822265s] +Jun 18 07:31:48.544: INFO: Created: latency-svc-nngn7 +Jun 18 07:31:48.546: INFO: Got endpoints: latency-svc-nngn7 [3.915597017s] +Jun 18 07:31:48.612: INFO: Created: latency-svc-25r5t +Jun 18 07:31:48.630: INFO: Got endpoints: latency-svc-25r5t [3.092965873s] +Jun 18 07:31:50.570: INFO: Created: latency-svc-r5pdz +Jun 18 07:31:50.620: INFO: Got endpoints: latency-svc-r5pdz [4.028952184s] +Jun 18 07:31:50.621: INFO: Created: latency-svc-kb9jz +Jun 18 07:31:50.650: INFO: Got endpoints: latency-svc-kb9jz [4.049217523s] +Jun 18 07:31:50.679: INFO: Created: latency-svc-w7wcm +Jun 18 07:31:51.539: INFO: Created: latency-svc-xl6wj +Jun 18 07:31:51.580: INFO: Got endpoints: latency-svc-w7wcm [4.946342202s] +Jun 18 07:31:51.584: INFO: Got endpoints: latency-svc-xl6wj [4.920790765s] +Jun 18 07:31:51.589: INFO: Created: latency-svc-mvcjp +Jun 18 07:31:51.594: INFO: Got endpoints: latency-svc-mvcjp [4.924601095s] +Jun 18 07:31:51.600: INFO: Created: latency-svc-6z7nl +Jun 18 07:31:51.610: INFO: Got endpoints: latency-svc-6z7nl [4.933135172s] +Jun 18 07:31:51.651: INFO: Created: latency-svc-cxcvd +Jun 18 07:31:51.662: INFO: Got endpoints: latency-svc-cxcvd [4.98015354s] +Jun 18 07:31:51.662: INFO: Created: latency-svc-nhghv +Jun 18 07:31:51.673: INFO: Got endpoints: latency-svc-nhghv [4.103185225s] +Jun 18 07:31:52.548: INFO: Created: latency-svc-bdvjf +Jun 18 07:31:53.537: INFO: Got endpoints: latency-svc-bdvjf [5.952982234s] +Jun 18 07:31:54.542: INFO: Created: latency-svc-hksfx +Jun 18 07:31:54.553: INFO: Got endpoints: latency-svc-hksfx [6.954967417s] +Jun 18 07:31:54.555: INFO: Created: latency-svc-vmx4c +Jun 18 07:31:54.559: INFO: Got endpoints: latency-svc-vmx4c [6.94654257s] +Jun 18 07:31:54.572: INFO: Created: latency-svc-n5rjz +Jun 18 07:31:54.583: INFO: Got endpoints: latency-svc-n5rjz [6.939945677s] +Jun 18 07:31:54.594: INFO: Created: latency-svc-jl59q +Jun 18 07:31:54.602: INFO: Got endpoints: latency-svc-jl59q [6.066061312s] +Jun 18 07:31:54.628: INFO: Created: latency-svc-8g4vz +Jun 18 07:31:54.641: INFO: Got endpoints: latency-svc-8g4vz [6.095345824s] +Jun 18 07:31:54.652: INFO: Created: latency-svc-nwp4m +Jun 18 07:31:54.661: INFO: Got endpoints: latency-svc-nwp4m [6.030491784s] +Jun 18 07:31:55.509: INFO: Created: latency-svc-lkjzb +Jun 18 07:31:55.532: INFO: Got endpoints: latency-svc-lkjzb [4.911593376s] +Jun 18 07:31:55.540: INFO: Created: latency-svc-7n4ks +Jun 18 07:31:55.548: INFO: Got endpoints: latency-svc-7n4ks [4.897925246s] +Jun 18 07:31:55.571: INFO: Created: latency-svc-6rlv2 +Jun 18 07:31:55.575: INFO: Got endpoints: latency-svc-6rlv2 [3.995110705s] +Jun 18 07:31:55.575: INFO: Created: latency-svc-qqrnx +Jun 18 07:31:55.582: INFO: Got endpoints: latency-svc-qqrnx [3.997582151s] +Jun 18 07:31:55.593: INFO: Created: latency-svc-mxgvk +Jun 18 07:31:55.606: INFO: Created: latency-svc-v9tqt +Jun 18 07:31:55.606: INFO: Got endpoints: latency-svc-mxgvk [4.012743436s] +Jun 18 07:31:55.613: INFO: Got endpoints: latency-svc-v9tqt [4.002645428s] +Jun 18 07:31:55.631: INFO: Created: latency-svc-jsfcv +Jun 18 07:31:55.640: INFO: Got endpoints: latency-svc-jsfcv [3.97820167s] +Jun 18 07:31:55.658: INFO: Created: latency-svc-4pbtc +Jun 18 07:31:55.667: INFO: Got endpoints: latency-svc-4pbtc [3.993288873s] +Jun 18 07:31:56.514: INFO: Created: latency-svc-bnlh9 +Jun 18 07:31:56.521: INFO: Got endpoints: latency-svc-bnlh9 [2.983866277s] +Jun 18 07:31:56.526: INFO: Created: latency-svc-5wzfb +Jun 18 07:31:56.536: INFO: Created: latency-svc-cmntm +Jun 18 07:31:56.537: INFO: Got endpoints: latency-svc-5wzfb [1.983786799s] +Jun 18 07:31:56.557: INFO: Got endpoints: latency-svc-cmntm [1.998379846s] +Jun 18 07:31:56.573: INFO: Created: latency-svc-x4mtv +Jun 18 07:31:56.574: INFO: Got endpoints: latency-svc-x4mtv [1.98997108s] +Jun 18 07:31:56.582: INFO: Created: latency-svc-9n4rd +Jun 18 07:31:56.587: INFO: Got endpoints: latency-svc-9n4rd [1.984922981s] +Jun 18 07:31:56.588: INFO: Created: latency-svc-fbg45 +Jun 18 07:31:56.592: INFO: Got endpoints: latency-svc-fbg45 [1.951026993s] +Jun 18 07:31:56.601: INFO: Created: latency-svc-fss8j +Jun 18 07:31:56.613: INFO: Got endpoints: latency-svc-fss8j [1.951552837s] +Jun 18 07:31:56.616: INFO: Created: latency-svc-2k7dw +Jun 18 07:31:56.625: INFO: Got endpoints: latency-svc-2k7dw [1.093087602s] +Jun 18 07:31:56.627: INFO: Created: latency-svc-6tc94 +Jun 18 07:31:56.633: INFO: Got endpoints: latency-svc-6tc94 [1.085383833s] +Jun 18 07:31:56.641: INFO: Created: latency-svc-mx4z5 +Jun 18 07:31:56.644: INFO: Got endpoints: latency-svc-mx4z5 [1.069194998s] +Jun 18 07:31:56.652: INFO: Created: latency-svc-gb6ld +Jun 18 07:31:56.655: INFO: Got endpoints: latency-svc-gb6ld [1.072672268s] +Jun 18 07:31:56.655: INFO: Created: latency-svc-wrvvp +Jun 18 07:31:56.668: INFO: Got endpoints: latency-svc-wrvvp [1.061541207s] +Jun 18 07:31:56.668: INFO: Created: latency-svc-grjdd +Jun 18 07:31:57.537: INFO: Created: latency-svc-nczgn +Jun 18 07:31:57.542: INFO: Got endpoints: latency-svc-grjdd [1.929235777s] +Jun 18 07:31:57.548: INFO: Got endpoints: latency-svc-nczgn [1.907818798s] +Jun 18 07:31:57.574: INFO: Created: latency-svc-z22kv +Jun 18 07:31:57.595: INFO: Got endpoints: latency-svc-z22kv [1.928587949s] +Jun 18 07:31:57.598: INFO: Created: latency-svc-jmqd9 +Jun 18 07:31:57.611: INFO: Got endpoints: latency-svc-jmqd9 [1.090076652s] +Jun 18 07:31:57.619: INFO: Created: latency-svc-68sr8 +Jun 18 07:31:57.628: INFO: Got endpoints: latency-svc-68sr8 [1.090749085s] +Jun 18 07:31:57.629: INFO: Created: latency-svc-s5vv2 +Jun 18 07:31:57.634: INFO: Created: latency-svc-kg4kp +Jun 18 07:31:57.637: INFO: Got endpoints: latency-svc-s5vv2 [1.079063784s] +Jun 18 07:31:57.643: INFO: Got endpoints: latency-svc-kg4kp [1.069912787s] +Jun 18 07:31:57.644: INFO: Created: latency-svc-7tqng +Jun 18 07:31:57.649: INFO: Got endpoints: latency-svc-7tqng [1.061902674s] +Jun 18 07:31:57.650: INFO: Created: latency-svc-9q9xp +Jun 18 07:31:57.654: INFO: Got endpoints: latency-svc-9q9xp [1.062207986s] +Jun 18 07:31:57.656: INFO: Created: latency-svc-f8qlz +Jun 18 07:31:57.661: INFO: Got endpoints: latency-svc-f8qlz [1.048230563s] +Jun 18 07:31:57.667: INFO: Created: latency-svc-9lwqz +Jun 18 07:31:57.670: INFO: Got endpoints: latency-svc-9lwqz [1.045506447s] +Jun 18 07:31:57.674: INFO: Created: latency-svc-2bk4q +Jun 18 07:31:57.677: INFO: Got endpoints: latency-svc-2bk4q [1.043493929s] +Jun 18 07:31:57.677: INFO: Created: latency-svc-fvp77 +Jun 18 07:31:58.514: INFO: Got endpoints: latency-svc-fvp77 [1.869426932s] +Jun 18 07:31:58.534: INFO: Created: latency-svc-62x7x +Jun 18 07:31:58.541: INFO: Got endpoints: latency-svc-62x7x [1.886333716s] +Jun 18 07:31:58.541: INFO: Created: latency-svc-dkm68 +Jun 18 07:31:58.553: INFO: Got endpoints: latency-svc-dkm68 [1.884588309s] +Jun 18 07:31:58.562: INFO: Created: latency-svc-4pcnq +Jun 18 07:31:58.568: INFO: Created: latency-svc-wb87p +Jun 18 07:31:58.571: INFO: Got endpoints: latency-svc-4pcnq [1.028437001s] +Jun 18 07:31:58.576: INFO: Got endpoints: latency-svc-wb87p [1.027945554s] +Jun 18 07:31:58.586: INFO: Created: latency-svc-jdzsf +Jun 18 07:31:58.587: INFO: Got endpoints: latency-svc-jdzsf [991.780521ms] +Jun 18 07:31:58.590: INFO: Created: latency-svc-4n48v +Jun 18 07:31:58.597: INFO: Got endpoints: latency-svc-4n48v [985.33257ms] +Jun 18 07:31:58.601: INFO: Created: latency-svc-dcsqf +Jun 18 07:31:58.601: INFO: Got endpoints: latency-svc-dcsqf [973.062979ms] +Jun 18 07:31:58.606: INFO: Created: latency-svc-szrdg +Jun 18 07:31:58.610: INFO: Got endpoints: latency-svc-szrdg [973.179519ms] +Jun 18 07:31:58.616: INFO: Created: latency-svc-r8l9q +Jun 18 07:31:58.627: INFO: Got endpoints: latency-svc-r8l9q [983.768671ms] +Jun 18 07:31:58.630: INFO: Created: latency-svc-sps9c +Jun 18 07:31:58.638: INFO: Got endpoints: latency-svc-sps9c [988.638877ms] +Jun 18 07:31:58.640: INFO: Created: latency-svc-fk7ft +Jun 18 07:31:58.642: INFO: Got endpoints: latency-svc-fk7ft [987.974454ms] +Jun 18 07:31:58.659: INFO: Created: latency-svc-78tsd +Jun 18 07:31:58.665: INFO: Got endpoints: latency-svc-78tsd [1.003687661s] +Jun 18 07:31:58.666: INFO: Created: latency-svc-d9f2r +Jun 18 07:31:58.671: INFO: Got endpoints: latency-svc-d9f2r [1.000627464s] +Jun 18 07:31:58.676: INFO: Created: latency-svc-hvqlg +Jun 18 07:31:58.679: INFO: Got endpoints: latency-svc-hvqlg [1.002551752s] +Jun 18 07:31:58.681: INFO: Created: latency-svc-fgnjj +Jun 18 07:31:58.685: INFO: Created: latency-svc-2m6v4 +Jun 18 07:31:58.686: INFO: Got endpoints: latency-svc-fgnjj [172.484248ms] +Jun 18 07:31:59.518: INFO: Got endpoints: latency-svc-2m6v4 [977.095302ms] +Jun 18 07:31:59.535: INFO: Created: latency-svc-7db7b +Jun 18 07:31:59.556: INFO: Got endpoints: latency-svc-7db7b [1.003586669s] +Jun 18 07:31:59.559: INFO: Created: latency-svc-xpdc6 +Jun 18 07:31:59.566: INFO: Got endpoints: latency-svc-xpdc6 [994.64097ms] +Jun 18 07:31:59.573: INFO: Created: latency-svc-vs44c +Jun 18 07:31:59.575: INFO: Got endpoints: latency-svc-vs44c [999.141004ms] +Jun 18 07:31:59.575: INFO: Latencies: [24.910298ms 128.766439ms 172.484248ms 973.062979ms 973.179519ms 977.095302ms 983.768671ms 985.33257ms 986.35005ms 987.974454ms 988.638877ms 991.780521ms 994.64097ms 999.141004ms 1.000627464s 1.002551752s 1.003586669s 1.003687661s 1.027337441s 1.027945554s 1.028437001s 1.02968306s 1.031001367s 1.043493929s 1.045506447s 1.048230563s 1.051717137s 1.061541207s 1.061902674s 1.062207986s 1.066183557s 1.069194998s 1.069912787s 1.072672268s 1.079063784s 1.082390579s 1.085383833s 1.090076652s 1.090749085s 1.092709321s 1.093087602s 1.127674914s 1.153692372s 1.869426932s 1.884588309s 1.886333716s 1.893198084s 1.907818798s 1.928587949s 1.929235777s 1.951026993s 1.951552837s 1.975734592s 1.981309034s 1.983786799s 1.984922981s 1.988165441s 1.989452776s 1.98997108s 1.994389786s 1.996049277s 1.996403529s 1.996572217s 1.998379846s 1.999468363s 1.999624622s 2.000234692s 2.002898672s 2.020227865s 2.021126775s 2.021451129s 2.046165183s 2.048020724s 2.050360307s 2.052740608s 2.060529704s 2.067708477s 2.068443896s 2.073925264s 2.075542883s 2.080905429s 2.099096416s 2.833985377s 2.881678984s 2.884651093s 2.898566399s 2.9743007s 2.97582135s 2.979485017s 2.983866277s 2.991121606s 2.993496958s 2.99606058s 2.997866188s 2.999363806s 3.002927986s 3.007890302s 3.008100016s 3.011239429s 3.019590785s 3.020534729s 3.021076163s 3.023466327s 3.027854976s 3.027884178s 3.03360211s 3.038403947s 3.041919145s 3.058493597s 3.059828432s 3.061113416s 3.065505977s 3.076390104s 3.078116783s 3.090978429s 3.092965873s 3.093273731s 3.093645106s 3.095760122s 3.096280998s 3.097709565s 3.100085219s 3.100296812s 3.102665834s 3.108528321s 3.14062479s 3.852756972s 3.878816429s 3.891843046s 3.893087181s 3.900888219s 3.912385555s 3.915597017s 3.918822265s 3.921872746s 3.924384845s 3.932478792s 3.935309863s 3.939915109s 3.947360481s 3.947937922s 3.950365115s 3.950406259s 3.950664205s 3.950974693s 3.957140422s 3.97820167s 3.978716362s 3.983725332s 3.986480336s 3.993288873s 3.993848746s 3.995110705s 3.997582151s 4.002645428s 4.008864683s 4.012743436s 4.015761498s 4.028952184s 4.030591364s 4.039633711s 4.047189981s 4.049217523s 4.081183163s 4.086662073s 4.09513071s 4.096530175s 4.100085182s 4.103185225s 4.134711075s 4.134993283s 4.897925246s 4.905295873s 4.911593376s 4.920790765s 4.924601095s 4.933135172s 4.946342202s 4.966736605s 4.978805167s 4.979444675s 4.98015354s 4.981748665s 4.981876635s 5.007011068s 5.011509862s 5.012900269s 5.013635471s 5.033381919s 5.873179417s 5.879156841s 5.887678625s 5.902037445s 5.952982234s 6.030491784s 6.066061312s 6.095345824s 6.939945677s 6.94654257s 6.954967417s] +Jun 18 07:31:59.575: INFO: 50 %ile: 3.020534729s +Jun 18 07:31:59.575: INFO: 90 %ile: 4.979444675s +Jun 18 07:31:59.575: INFO: 99 %ile: 6.94654257s +Jun 18 07:31:59.575: INFO: Total sample count: 200 +[AfterEach] [sig-network] Service endpoints latency + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:31:59.575: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-svc-latency-966hk" for this suite. +Jun 18 07:32:39.599: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:32:39.681: INFO: namespace: e2e-tests-svc-latency-966hk, resource: bindings, ignored listing per whitelist +Jun 18 07:32:40.515: INFO: namespace e2e-tests-svc-latency-966hk deletion completed in 40.932030663s + +• [SLOW TEST:92.750 seconds] +[sig-network] Service endpoints latency +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22 + should not be very high [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSS +------------------------------ +[sig-storage] Downward API volume + should provide podname only [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:32:40.516: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename downward-api +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-k759k +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39 +[It] should provide podname only [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +Jun 18 07:32:41.595: INFO: Waiting up to 5m0s for pod "downwardapi-volume-416e8632-919b-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-downward-api-k759k" to be "success or failure" +Jun 18 07:32:41.603: INFO: Pod "downwardapi-volume-416e8632-919b-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 7.947534ms +Jun 18 07:32:43.606: INFO: Pod "downwardapi-volume-416e8632-919b-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.010293273s +Jun 18 07:32:45.608: INFO: Pod "downwardapi-volume-416e8632-919b-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.012513004s +STEP: Saw pod success +Jun 18 07:32:45.608: INFO: Pod "downwardapi-volume-416e8632-919b-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:32:45.615: INFO: Trying to get logs from node node5 pod downwardapi-volume-416e8632-919b-11e9-bbf5-0e74dabf3615 container client-container: +STEP: delete the pod +Jun 18 07:32:45.632: INFO: Waiting for pod downwardapi-volume-416e8632-919b-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:32:45.633: INFO: Pod downwardapi-volume-416e8632-919b-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:32:45.633: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-downward-api-k759k" for this suite. +Jun 18 07:32:51.650: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:32:51.714: INFO: namespace: e2e-tests-downward-api-k759k, resource: bindings, ignored listing per whitelist +Jun 18 07:32:52.527: INFO: namespace e2e-tests-downward-api-k759k deletion completed in 6.890509148s + +• [SLOW TEST:12.012 seconds] +[sig-storage] Downward API volume +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34 + should provide podname only [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Secrets + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Secrets + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:32:52.528: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename secrets +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-secrets-dl6np +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating secret with name secret-test-map-481815ac-919b-11e9-bbf5-0e74dabf3615 +STEP: Creating a pod to test consume secrets +Jun 18 07:32:52.708: INFO: Waiting up to 5m0s for pod "pod-secrets-481888c6-919b-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-secrets-dl6np" to be "success or failure" +Jun 18 07:32:52.710: INFO: Pod "pod-secrets-481888c6-919b-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.528544ms +Jun 18 07:32:54.720: INFO: Pod "pod-secrets-481888c6-919b-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.012294937s +Jun 18 07:32:56.729: INFO: Pod "pod-secrets-481888c6-919b-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020670167s +STEP: Saw pod success +Jun 18 07:32:56.729: INFO: Pod "pod-secrets-481888c6-919b-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:32:56.731: INFO: Trying to get logs from node node5 pod pod-secrets-481888c6-919b-11e9-bbf5-0e74dabf3615 container secret-volume-test: +STEP: delete the pod +Jun 18 07:32:56.765: INFO: Waiting for pod pod-secrets-481888c6-919b-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:32:56.767: INFO: Pod pod-secrets-481888c6-919b-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] Secrets + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:32:56.767: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-secrets-dl6np" for this suite. +Jun 18 07:33:05.514: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:33:05.629: INFO: namespace: e2e-tests-secrets-dl6np, resource: bindings, ignored listing per whitelist +Jun 18 07:33:05.826: INFO: namespace e2e-tests-secrets-dl6np deletion completed in 9.056049984s + +• [SLOW TEST:13.299 seconds] +[sig-storage] Secrets +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:34 + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[k8s.io] Docker Containers + should be able to override the image's default command and arguments [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Docker Containers + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:33:05.827: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename containers +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-containers-275r6 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be able to override the image's default command and arguments [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test override all +Jun 18 07:33:06.696: INFO: Waiting up to 5m0s for pod "client-containers-506e560b-919b-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-containers-275r6" to be "success or failure" +Jun 18 07:33:06.698: INFO: Pod "client-containers-506e560b-919b-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 1.863985ms +Jun 18 07:33:08.700: INFO: Pod "client-containers-506e560b-919b-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.004277233s +Jun 18 07:33:10.703: INFO: Pod "client-containers-506e560b-919b-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 4.007216775s +Jun 18 07:33:12.706: INFO: Pod "client-containers-506e560b-919b-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.010263462s +STEP: Saw pod success +Jun 18 07:33:12.706: INFO: Pod "client-containers-506e560b-919b-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:33:12.708: INFO: Trying to get logs from node node5 pod client-containers-506e560b-919b-11e9-bbf5-0e74dabf3615 container test-container: +STEP: delete the pod +Jun 18 07:33:12.723: INFO: Waiting for pod client-containers-506e560b-919b-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:33:12.725: INFO: Pod client-containers-506e560b-919b-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [k8s.io] Docker Containers + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:33:12.725: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-containers-275r6" for this suite. +Jun 18 07:33:20.737: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:33:20.840: INFO: namespace: e2e-tests-containers-275r6, resource: bindings, ignored listing per whitelist +Jun 18 07:33:21.525: INFO: namespace e2e-tests-containers-275r6 deletion completed in 8.795793549s + +• [SLOW TEST:15.698 seconds] +[k8s.io] Docker Containers +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should be able to override the image's default command and arguments [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSS +------------------------------ +[sig-storage] ConfigMap + should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:33:21.525: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename configmap +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-configmap-w4tbg +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap with name configmap-test-volume-5962f1b5-919b-11e9-bbf5-0e74dabf3615 +STEP: Creating a pod to test consume configMaps +Jun 18 07:33:21.720: INFO: Waiting up to 5m0s for pod "pod-configmaps-59637475-919b-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-configmap-w4tbg" to be "success or failure" +Jun 18 07:33:21.723: INFO: Pod "pod-configmaps-59637475-919b-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.26553ms +Jun 18 07:33:23.725: INFO: Pod "pod-configmaps-59637475-919b-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.004731122s +STEP: Saw pod success +Jun 18 07:33:23.725: INFO: Pod "pod-configmaps-59637475-919b-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:33:23.728: INFO: Trying to get logs from node node5 pod pod-configmaps-59637475-919b-11e9-bbf5-0e74dabf3615 container configmap-volume-test: +STEP: delete the pod +Jun 18 07:33:23.742: INFO: Waiting for pod pod-configmaps-59637475-919b-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:33:23.744: INFO: Pod pod-configmaps-59637475-919b-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:33:23.744: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-configmap-w4tbg" for this suite. +Jun 18 07:33:29.759: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:33:30.605: INFO: namespace: e2e-tests-configmap-w4tbg, resource: bindings, ignored listing per whitelist +Jun 18 07:33:30.653: INFO: namespace e2e-tests-configmap-w4tbg deletion completed in 6.906173741s + +• [SLOW TEST:9.128 seconds] +[sig-storage] ConfigMap +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33 + should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SS +------------------------------ +[k8s.io] Pods + should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:33:30.653: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename pods +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-pods-mkr46 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:132 +[It] should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating the pod +STEP: submitting the pod to kubernetes +STEP: verifying the pod is in kubernetes +STEP: updating the pod +Jun 18 07:33:34.025: INFO: Successfully updated pod "pod-update-activedeadlineseconds-5ed1ea5b-919b-11e9-bbf5-0e74dabf3615" +Jun 18 07:33:34.025: INFO: Waiting up to 5m0s for pod "pod-update-activedeadlineseconds-5ed1ea5b-919b-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-pods-mkr46" to be "terminated due to deadline exceeded" +Jun 18 07:33:34.027: INFO: Pod "pod-update-activedeadlineseconds-5ed1ea5b-919b-11e9-bbf5-0e74dabf3615": Phase="Running", Reason="", readiness=true. Elapsed: 2.034542ms +Jun 18 07:33:36.512: INFO: Pod "pod-update-activedeadlineseconds-5ed1ea5b-919b-11e9-bbf5-0e74dabf3615": Phase="Failed", Reason="DeadlineExceeded", readiness=false. Elapsed: 2.486155544s +Jun 18 07:33:36.512: INFO: Pod "pod-update-activedeadlineseconds-5ed1ea5b-919b-11e9-bbf5-0e74dabf3615" satisfied condition "terminated due to deadline exceeded" +[AfterEach] [k8s.io] Pods + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:33:36.512: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-pods-mkr46" for this suite. +Jun 18 07:33:46.522: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:33:46.624: INFO: namespace: e2e-tests-pods-mkr46, resource: bindings, ignored listing per whitelist +Jun 18 07:33:47.531: INFO: namespace e2e-tests-pods-mkr46 deletion completed in 11.015758422s + +• [SLOW TEST:16.878 seconds] +[k8s.io] Pods +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +S +------------------------------ +[sig-network] DNS + should provide DNS for services [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-network] DNS + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:33:47.531: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename dns +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-dns-5vvfd +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide DNS for services [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a test headless service +STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service;check="$$(dig +tcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service;check="$$(dig +notcp +noall +answer +search dns-test-service.e2e-tests-dns-5vvfd A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.e2e-tests-dns-5vvfd;check="$$(dig +tcp +noall +answer +search dns-test-service.e2e-tests-dns-5vvfd A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.e2e-tests-dns-5vvfd;check="$$(dig +notcp +noall +answer +search dns-test-service.e2e-tests-dns-5vvfd.svc A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.e2e-tests-dns-5vvfd.svc;check="$$(dig +tcp +noall +answer +search dns-test-service.e2e-tests-dns-5vvfd.svc A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.e2e-tests-dns-5vvfd.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.e2e-tests-dns-5vvfd.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.dns-test-service.e2e-tests-dns-5vvfd.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.e2e-tests-dns-5vvfd.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.dns-test-service.e2e-tests-dns-5vvfd.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.e2e-tests-dns-5vvfd.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.test-service-2.e2e-tests-dns-5vvfd.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.e2e-tests-dns-5vvfd.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.test-service-2.e2e-tests-dns-5vvfd.svc;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".e2e-tests-dns-5vvfd.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;check="$$(dig +notcp +noall +answer +search 36.246.169.169.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/169.169.246.36_udp@PTR;check="$$(dig +tcp +noall +answer +search 36.246.169.169.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/169.169.246.36_tcp@PTR;sleep 1; done + +STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service;check="$$(dig +tcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service;check="$$(dig +notcp +noall +answer +search dns-test-service.e2e-tests-dns-5vvfd A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.e2e-tests-dns-5vvfd;check="$$(dig +tcp +noall +answer +search dns-test-service.e2e-tests-dns-5vvfd A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.e2e-tests-dns-5vvfd;check="$$(dig +notcp +noall +answer +search dns-test-service.e2e-tests-dns-5vvfd.svc A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.e2e-tests-dns-5vvfd.svc;check="$$(dig +tcp +noall +answer +search dns-test-service.e2e-tests-dns-5vvfd.svc A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.e2e-tests-dns-5vvfd.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.e2e-tests-dns-5vvfd.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.dns-test-service.e2e-tests-dns-5vvfd.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.e2e-tests-dns-5vvfd.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.dns-test-service.e2e-tests-dns-5vvfd.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.e2e-tests-dns-5vvfd.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.test-service-2.e2e-tests-dns-5vvfd.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.e2e-tests-dns-5vvfd.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.test-service-2.e2e-tests-dns-5vvfd.svc;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".e2e-tests-dns-5vvfd.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;check="$$(dig +notcp +noall +answer +search 36.246.169.169.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/169.169.246.36_udp@PTR;check="$$(dig +tcp +noall +answer +search 36.246.169.169.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/169.169.246.36_tcp@PTR;sleep 1; done + +STEP: creating a pod to probe DNS +STEP: submitting the pod to kubernetes +STEP: retrieving the pod +STEP: looking for the results for each expected name from probers +Jun 18 07:33:52.600: INFO: Unable to read wheezy_udp@dns-test-service from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:33:52.617: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.e2e-tests-dns-5vvfd.svc from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:33:52.620: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.e2e-tests-dns-5vvfd.svc from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:33:52.623: INFO: Unable to read wheezy_udp@_http._tcp.test-service-2.e2e-tests-dns-5vvfd.svc from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:33:52.626: INFO: Unable to read wheezy_tcp@_http._tcp.test-service-2.e2e-tests-dns-5vvfd.svc from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:33:52.629: INFO: Unable to read wheezy_udp@PodARecord from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:33:52.631: INFO: Unable to read wheezy_tcp@PodARecord from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:33:52.638: INFO: Unable to read jessie_udp@dns-test-service from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:33:52.640: INFO: Unable to read jessie_tcp@dns-test-service from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:33:52.643: INFO: Unable to read jessie_udp@dns-test-service.e2e-tests-dns-5vvfd from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:33:52.645: INFO: Unable to read jessie_tcp@dns-test-service.e2e-tests-dns-5vvfd from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:33:52.650: INFO: Unable to read jessie_udp@dns-test-service.e2e-tests-dns-5vvfd.svc from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:33:52.653: INFO: Unable to read jessie_tcp@dns-test-service.e2e-tests-dns-5vvfd.svc from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:33:52.677: INFO: Lookups using e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615 failed for: [wheezy_udp@dns-test-service wheezy_udp@_http._tcp.dns-test-service.e2e-tests-dns-5vvfd.svc wheezy_tcp@_http._tcp.dns-test-service.e2e-tests-dns-5vvfd.svc wheezy_udp@_http._tcp.test-service-2.e2e-tests-dns-5vvfd.svc wheezy_tcp@_http._tcp.test-service-2.e2e-tests-dns-5vvfd.svc wheezy_udp@PodARecord wheezy_tcp@PodARecord jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.e2e-tests-dns-5vvfd jessie_tcp@dns-test-service.e2e-tests-dns-5vvfd jessie_udp@dns-test-service.e2e-tests-dns-5vvfd.svc jessie_tcp@dns-test-service.e2e-tests-dns-5vvfd.svc] + +Jun 18 07:33:57.679: INFO: Unable to read wheezy_udp@dns-test-service from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:33:57.702: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.e2e-tests-dns-5vvfd.svc from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:33:57.720: INFO: Unable to read jessie_udp@dns-test-service from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:33:57.722: INFO: Unable to read jessie_tcp@dns-test-service from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:33:57.726: INFO: Unable to read jessie_udp@dns-test-service.e2e-tests-dns-5vvfd from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:33:57.729: INFO: Unable to read jessie_tcp@dns-test-service.e2e-tests-dns-5vvfd from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:33:57.731: INFO: Unable to read jessie_udp@dns-test-service.e2e-tests-dns-5vvfd.svc from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:33:57.733: INFO: Unable to read jessie_tcp@dns-test-service.e2e-tests-dns-5vvfd.svc from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:33:57.755: INFO: Lookups using e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615 failed for: [wheezy_udp@dns-test-service wheezy_tcp@_http._tcp.dns-test-service.e2e-tests-dns-5vvfd.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.e2e-tests-dns-5vvfd jessie_tcp@dns-test-service.e2e-tests-dns-5vvfd jessie_udp@dns-test-service.e2e-tests-dns-5vvfd.svc jessie_tcp@dns-test-service.e2e-tests-dns-5vvfd.svc] + +Jun 18 07:34:02.681: INFO: Unable to read wheezy_udp@dns-test-service from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:02.721: INFO: Unable to read jessie_udp@dns-test-service from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:02.723: INFO: Unable to read jessie_tcp@dns-test-service from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:02.726: INFO: Unable to read jessie_udp@dns-test-service.e2e-tests-dns-5vvfd from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:02.729: INFO: Unable to read jessie_tcp@dns-test-service.e2e-tests-dns-5vvfd from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:02.731: INFO: Unable to read jessie_udp@dns-test-service.e2e-tests-dns-5vvfd.svc from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:02.734: INFO: Unable to read jessie_tcp@dns-test-service.e2e-tests-dns-5vvfd.svc from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:02.757: INFO: Lookups using e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615 failed for: [wheezy_udp@dns-test-service jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.e2e-tests-dns-5vvfd jessie_tcp@dns-test-service.e2e-tests-dns-5vvfd jessie_udp@dns-test-service.e2e-tests-dns-5vvfd.svc jessie_tcp@dns-test-service.e2e-tests-dns-5vvfd.svc] + +Jun 18 07:34:07.679: INFO: Unable to read wheezy_udp@dns-test-service from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:07.724: INFO: Unable to read jessie_udp@dns-test-service from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:07.727: INFO: Unable to read jessie_tcp@dns-test-service from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:07.729: INFO: Unable to read jessie_udp@dns-test-service.e2e-tests-dns-5vvfd from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:07.731: INFO: Unable to read jessie_tcp@dns-test-service.e2e-tests-dns-5vvfd from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:07.733: INFO: Unable to read jessie_udp@dns-test-service.e2e-tests-dns-5vvfd.svc from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:07.735: INFO: Unable to read jessie_tcp@dns-test-service.e2e-tests-dns-5vvfd.svc from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:07.755: INFO: Lookups using e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615 failed for: [wheezy_udp@dns-test-service jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.e2e-tests-dns-5vvfd jessie_tcp@dns-test-service.e2e-tests-dns-5vvfd jessie_udp@dns-test-service.e2e-tests-dns-5vvfd.svc jessie_tcp@dns-test-service.e2e-tests-dns-5vvfd.svc] + +Jun 18 07:34:12.680: INFO: Unable to read wheezy_udp@dns-test-service from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:12.720: INFO: Unable to read jessie_udp@dns-test-service from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:12.722: INFO: Unable to read jessie_tcp@dns-test-service from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:12.727: INFO: Unable to read jessie_udp@dns-test-service.e2e-tests-dns-5vvfd from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:12.736: INFO: Unable to read jessie_tcp@dns-test-service.e2e-tests-dns-5vvfd from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:12.740: INFO: Unable to read jessie_udp@dns-test-service.e2e-tests-dns-5vvfd.svc from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:12.743: INFO: Unable to read jessie_tcp@dns-test-service.e2e-tests-dns-5vvfd.svc from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:12.771: INFO: Lookups using e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615 failed for: [wheezy_udp@dns-test-service jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.e2e-tests-dns-5vvfd jessie_tcp@dns-test-service.e2e-tests-dns-5vvfd jessie_udp@dns-test-service.e2e-tests-dns-5vvfd.svc jessie_tcp@dns-test-service.e2e-tests-dns-5vvfd.svc] + +Jun 18 07:34:17.690: INFO: Unable to read wheezy_udp@dns-test-service from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:17.745: INFO: Unable to read jessie_udp@dns-test-service from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:17.749: INFO: Unable to read jessie_tcp@dns-test-service from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:17.751: INFO: Unable to read jessie_udp@dns-test-service.e2e-tests-dns-5vvfd from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:17.753: INFO: Unable to read jessie_tcp@dns-test-service.e2e-tests-dns-5vvfd from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:17.757: INFO: Unable to read jessie_udp@dns-test-service.e2e-tests-dns-5vvfd.svc from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:17.762: INFO: Unable to read jessie_tcp@dns-test-service.e2e-tests-dns-5vvfd.svc from pod e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615: the server could not find the requested resource (get pods dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615) +Jun 18 07:34:18.541: INFO: Lookups using e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615 failed for: [wheezy_udp@dns-test-service jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.e2e-tests-dns-5vvfd jessie_tcp@dns-test-service.e2e-tests-dns-5vvfd jessie_udp@dns-test-service.e2e-tests-dns-5vvfd.svc jessie_tcp@dns-test-service.e2e-tests-dns-5vvfd.svc] + +Jun 18 07:34:22.751: INFO: DNS probes using e2e-tests-dns-5vvfd/dns-test-6960f83c-919b-11e9-bbf5-0e74dabf3615 succeeded + +STEP: deleting the pod +STEP: deleting the test service +STEP: deleting the test headless service +[AfterEach] [sig-network] DNS + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:34:22.793: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-dns-5vvfd" for this suite. +Jun 18 07:34:30.808: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:34:30.833: INFO: namespace: e2e-tests-dns-5vvfd, resource: bindings, ignored listing per whitelist +Jun 18 07:34:31.614: INFO: namespace e2e-tests-dns-5vvfd deletion completed in 8.81702647s + +• [SLOW TEST:44.083 seconds] +[sig-network] DNS +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22 + should provide DNS for services [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSS +------------------------------ +[k8s.io] [sig-node] Pods Extended [k8s.io] Pods Set QOS Class + should be submitted and removed [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] [sig-node] Pods Extended + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:34:31.614: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename pods +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-pods-tbfvf +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Pods Set QOS Class + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/node/pods.go:204 +[It] should be submitted and removed [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating the pod +STEP: submitting the pod to kubernetes +STEP: verifying QOS class is set on the pod +[AfterEach] [k8s.io] [sig-node] Pods Extended + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:34:31.789: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-pods-tbfvf" for this suite. +Jun 18 07:34:57.801: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:34:58.516: INFO: namespace: e2e-tests-pods-tbfvf, resource: bindings, ignored listing per whitelist +Jun 18 07:34:58.712: INFO: namespace e2e-tests-pods-tbfvf deletion completed in 26.920106982s + +• [SLOW TEST:27.098 seconds] +[k8s.io] [sig-node] Pods Extended +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + [k8s.io] Pods Set QOS Class + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should be submitted and removed [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SS +------------------------------ +[sig-storage] Projected secret + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected secret + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:34:58.713: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-rq2rm +STEP: Waiting for a default service account to be provisioned in namespace +[It] optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating secret with name s-test-opt-del-93c88931-919b-11e9-bbf5-0e74dabf3615 +STEP: Creating secret with name s-test-opt-upd-93c88994-919b-11e9-bbf5-0e74dabf3615 +STEP: Creating the pod +STEP: Deleting secret s-test-opt-del-93c88931-919b-11e9-bbf5-0e74dabf3615 +STEP: Updating secret s-test-opt-upd-93c88994-919b-11e9-bbf5-0e74dabf3615 +STEP: Creating secret with name s-test-opt-create-93c889b3-919b-11e9-bbf5-0e74dabf3615 +STEP: waiting to observe update in volume +[AfterEach] [sig-storage] Projected secret + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:36:20.577: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-rq2rm" for this suite. +Jun 18 07:36:52.590: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:36:52.627: INFO: namespace: e2e-tests-projected-rq2rm, resource: bindings, ignored listing per whitelist +Jun 18 07:36:53.514: INFO: namespace e2e-tests-projected-rq2rm deletion completed in 32.932747237s + +• [SLOW TEST:114.801 seconds] +[sig-storage] Projected secret +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:34 + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSS +------------------------------ +[k8s.io] Kubelet when scheduling a busybox command that always fails in a pod + should be possible to delete [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Kubelet + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:36:53.514: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename kubelet-test +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubelet-test-g9thz +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Kubelet + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37 +[BeforeEach] when scheduling a busybox command that always fails in a pod + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:81 +[It] should be possible to delete [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[AfterEach] [k8s.io] Kubelet + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:36:53.733: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubelet-test-g9thz" for this suite. +Jun 18 07:37:01.745: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:37:01.829: INFO: namespace: e2e-tests-kubelet-test-g9thz, resource: bindings, ignored listing per whitelist +Jun 18 07:37:02.055: INFO: namespace e2e-tests-kubelet-test-g9thz deletion completed in 8.319163262s + +• [SLOW TEST:8.541 seconds] +[k8s.io] Kubelet +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + when scheduling a busybox command that always fails in a pod + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:78 + should be possible to delete [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Downward API + should provide pod UID as env vars [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-node] Downward API + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:37:02.056: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename downward-api +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-57flt +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide pod UID as env vars [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward api env vars +Jun 18 07:37:02.667: INFO: Waiting up to 5m0s for pod "downward-api-dd150624-919b-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-downward-api-57flt" to be "success or failure" +Jun 18 07:37:02.672: INFO: Pod "downward-api-dd150624-919b-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 4.705273ms +Jun 18 07:37:04.681: INFO: Pod "downward-api-dd150624-919b-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014167065s +Jun 18 07:37:06.684: INFO: Pod "downward-api-dd150624-919b-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.016823545s +STEP: Saw pod success +Jun 18 07:37:06.684: INFO: Pod "downward-api-dd150624-919b-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:37:06.686: INFO: Trying to get logs from node node5 pod downward-api-dd150624-919b-11e9-bbf5-0e74dabf3615 container dapi-container: +STEP: delete the pod +Jun 18 07:37:06.711: INFO: Waiting for pod downward-api-dd150624-919b-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:37:06.715: INFO: Pod downward-api-dd150624-919b-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-node] Downward API + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:37:06.715: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-downward-api-57flt" for this suite. +Jun 18 07:37:15.551: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:37:15.621: INFO: namespace: e2e-tests-downward-api-57flt, resource: bindings, ignored listing per whitelist +Jun 18 07:37:16.512: INFO: namespace e2e-tests-downward-api-57flt deletion completed in 9.793164265s + +• [SLOW TEST:14.456 seconds] +[sig-node] Downward API +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:38 + should provide pod UID as env vars [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSS +------------------------------ +[sig-storage] Downward API volume + should update labels on modification [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:37:16.512: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename downward-api +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-wcrjj +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39 +[It] should update labels on modification [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating the pod +Jun 18 07:37:19.515: INFO: Successfully updated pod "labelsupdatee573933f-919b-11e9-bbf5-0e74dabf3615" +[AfterEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:37:23.554: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-downward-api-wcrjj" for this suite. +Jun 18 07:37:47.570: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:37:47.692: INFO: namespace: e2e-tests-downward-api-wcrjj, resource: bindings, ignored listing per whitelist +Jun 18 07:37:48.546: INFO: namespace e2e-tests-downward-api-wcrjj deletion completed in 24.987428331s + +• [SLOW TEST:32.034 seconds] +[sig-storage] Downward API volume +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34 + should update labels on modification [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSS +------------------------------ +[k8s.io] Kubelet when scheduling a busybox Pod with hostAliases + should write entries to /etc/hosts [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Kubelet + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:37:48.546: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename kubelet-test +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubelet-test-f52d9 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Kubelet + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37 +[It] should write entries to /etc/hosts [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[AfterEach] [k8s.io] Kubelet + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:37:53.736: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubelet-test-f52d9" for this suite. +Jun 18 07:38:35.753: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:38:35.855: INFO: namespace: e2e-tests-kubelet-test-f52d9, resource: bindings, ignored listing per whitelist +Jun 18 07:38:36.065: INFO: namespace e2e-tests-kubelet-test-f52d9 deletion completed in 42.324293238s + +• [SLOW TEST:47.518 seconds] +[k8s.io] Kubelet +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + when scheduling a busybox Pod with hostAliases + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:136 + should write entries to /etc/hosts [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSS +------------------------------ +[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook + should execute prestop http hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Container Lifecycle Hook + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:38:36.065: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename container-lifecycle-hook +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-container-lifecycle-hook-6xdgd +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] when create a pod with lifecycle hook + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:61 +STEP: create the container to handle the HTTPGet hook request. +[It] should execute prestop http hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: create the pod with lifecycle hook +STEP: delete the pod with lifecycle hook +Jun 18 07:38:42.657: INFO: Waiting for pod pod-with-prestop-http-hook to disappear +Jun 18 07:38:42.660: INFO: Pod pod-with-prestop-http-hook still exists +Jun 18 07:38:44.661: INFO: Waiting for pod pod-with-prestop-http-hook to disappear +Jun 18 07:38:44.664: INFO: Pod pod-with-prestop-http-hook still exists +Jun 18 07:38:46.661: INFO: Waiting for pod pod-with-prestop-http-hook to disappear +Jun 18 07:38:46.663: INFO: Pod pod-with-prestop-http-hook no longer exists +STEP: check prestop hook +[AfterEach] [k8s.io] Container Lifecycle Hook + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:38:46.676: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-container-lifecycle-hook-6xdgd" for this suite. +Jun 18 07:39:10.693: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:39:11.573: INFO: namespace: e2e-tests-container-lifecycle-hook-6xdgd, resource: bindings, ignored listing per whitelist +Jun 18 07:39:11.594: INFO: namespace e2e-tests-container-lifecycle-hook-6xdgd deletion completed in 24.913329724s + +• [SLOW TEST:35.529 seconds] +[k8s.io] Container Lifecycle Hook +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + when create a pod with lifecycle hook + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:40 + should execute prestop http hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-node] Downward API + should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-node] Downward API + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:39:11.594: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename downward-api +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-c7mjb +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward api env vars +Jun 18 07:39:12.528: INFO: Waiting up to 5m0s for pod "downward-api-2a7b8063-919c-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-downward-api-c7mjb" to be "success or failure" +Jun 18 07:39:12.532: INFO: Pod "downward-api-2a7b8063-919c-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 4.077836ms +Jun 18 07:39:14.535: INFO: Pod "downward-api-2a7b8063-919c-11e9-bbf5-0e74dabf3615": Phase="Running", Reason="", readiness=true. Elapsed: 2.006217462s +Jun 18 07:39:16.539: INFO: Pod "downward-api-2a7b8063-919c-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01052826s +STEP: Saw pod success +Jun 18 07:39:16.539: INFO: Pod "downward-api-2a7b8063-919c-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:39:16.541: INFO: Trying to get logs from node node5 pod downward-api-2a7b8063-919c-11e9-bbf5-0e74dabf3615 container dapi-container: +STEP: delete the pod +Jun 18 07:39:16.559: INFO: Waiting for pod downward-api-2a7b8063-919c-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:39:16.561: INFO: Pod downward-api-2a7b8063-919c-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-node] Downward API + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:39:16.561: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-downward-api-c7mjb" for this suite. +Jun 18 07:39:24.578: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:39:24.655: INFO: namespace: e2e-tests-downward-api-c7mjb, resource: bindings, ignored listing per whitelist +Jun 18 07:39:24.891: INFO: namespace e2e-tests-downward-api-c7mjb deletion completed in 8.325800072s + +• [SLOW TEST:13.297 seconds] +[sig-node] Downward API +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:38 + should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSS +------------------------------ +[sig-network] Networking Granular Checks: Pods + should function for intra-pod communication: udp [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-network] Networking + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:39:24.891: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename pod-network-test +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-pod-network-test-zw7z4 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should function for intra-pod communication: udp [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Performing setup for networking test in namespace e2e-tests-pod-network-test-zw7z4 +STEP: creating a selector +STEP: Creating the service pods in kubernetes +Jun 18 07:39:25.638: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable +STEP: Creating test pods +Jun 18 07:39:48.543: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://171.171.33.160:8080/dial?request=hostName&protocol=udp&host=171.171.104.61&port=8081&tries=1'] Namespace:e2e-tests-pod-network-test-zw7z4 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +Jun 18 07:39:48.544: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +Jun 18 07:39:48.644: INFO: Waiting for endpoints: map[] +Jun 18 07:39:48.653: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://171.171.33.160:8080/dial?request=hostName&protocol=udp&host=171.171.3.103&port=8081&tries=1'] Namespace:e2e-tests-pod-network-test-zw7z4 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +Jun 18 07:39:48.653: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +Jun 18 07:39:49.518: INFO: Waiting for endpoints: map[] +Jun 18 07:39:49.522: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://171.171.33.160:8080/dial?request=hostName&protocol=udp&host=171.171.135.3&port=8081&tries=1'] Namespace:e2e-tests-pod-network-test-zw7z4 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +Jun 18 07:39:49.522: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +Jun 18 07:39:49.608: INFO: Waiting for endpoints: map[] +Jun 18 07:39:49.610: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://171.171.33.160:8080/dial?request=hostName&protocol=udp&host=171.171.33.167&port=8081&tries=1'] Namespace:e2e-tests-pod-network-test-zw7z4 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +Jun 18 07:39:49.610: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +Jun 18 07:39:49.682: INFO: Waiting for endpoints: map[] +Jun 18 07:39:49.684: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://171.171.33.160:8080/dial?request=hostName&protocol=udp&host=171.171.166.138&port=8081&tries=1'] Namespace:e2e-tests-pod-network-test-zw7z4 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +Jun 18 07:39:49.684: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +Jun 18 07:39:49.754: INFO: Waiting for endpoints: map[] +[AfterEach] [sig-network] Networking + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:39:49.754: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-pod-network-test-zw7z4" for this suite. +Jun 18 07:40:13.767: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:40:14.549: INFO: namespace: e2e-tests-pod-network-test-zw7z4, resource: bindings, ignored listing per whitelist +Jun 18 07:40:14.578: INFO: namespace e2e-tests-pod-network-test-zw7z4 deletion completed in 24.818664097s + +• [SLOW TEST:49.687 seconds] +[sig-network] Networking +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:25 + Granular Checks: Pods + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:28 + should function for intra-pod communication: udp [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSS +------------------------------ +[k8s.io] InitContainer [NodeConformance] + should invoke init containers on a RestartAlways pod [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:40:14.578: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename init-container +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-init-container-r2tj4 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:43 +[It] should invoke init containers on a RestartAlways pod [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating the pod +Jun 18 07:40:14.749: INFO: PodSpec: initContainers in spec.initContainers +[AfterEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:40:20.522: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-init-container-r2tj4" for this suite. +Jun 18 07:40:46.537: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:40:46.623: INFO: namespace: e2e-tests-init-container-r2tj4, resource: bindings, ignored listing per whitelist +Jun 18 07:40:47.512: INFO: namespace e2e-tests-init-container-r2tj4 deletion completed in 26.986048856s + +• [SLOW TEST:32.934 seconds] +[k8s.io] InitContainer [NodeConformance] +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should invoke init containers on a RestartAlways pod [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[k8s.io] Kubelet when scheduling a busybox command that always fails in a pod + should have an terminated reason [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Kubelet + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:40:47.513: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename kubelet-test +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubelet-test-85jqh +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Kubelet + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37 +[BeforeEach] when scheduling a busybox command that always fails in a pod + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:81 +[It] should have an terminated reason [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[AfterEach] [k8s.io] Kubelet + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:40:52.534: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubelet-test-85jqh" for this suite. +Jun 18 07:41:02.549: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:41:02.584: INFO: namespace: e2e-tests-kubelet-test-85jqh, resource: bindings, ignored listing per whitelist +Jun 18 07:41:03.530: INFO: namespace e2e-tests-kubelet-test-85jqh deletion completed in 10.992242919s + +• [SLOW TEST:16.017 seconds] +[k8s.io] Kubelet +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + when scheduling a busybox command that always fails in a pod + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:78 + should have an terminated reason [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSS +------------------------------ +[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + Should recreate evicted statefulset [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:41:03.530: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename statefulset +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-statefulset-6r7nw +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:59 +[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:74 +STEP: Creating service test in namespace e2e-tests-statefulset-6r7nw +[It] Should recreate evicted statefulset [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Looking for a node to schedule stateful set and pod +STEP: Creating pod with conflicting port in namespace e2e-tests-statefulset-6r7nw +STEP: Creating statefulset with conflicting port in namespace e2e-tests-statefulset-6r7nw +STEP: Waiting until pod test-pod will start running in namespace e2e-tests-statefulset-6r7nw +STEP: Waiting until stateful pod ss-0 will be recreated and deleted at least once in namespace e2e-tests-statefulset-6r7nw +Jun 18 07:41:10.604: INFO: Observed stateful pod in namespace: e2e-tests-statefulset-6r7nw, name: ss-0, uid: 70d46bdb-919c-11e9-8cfd-00163e000a67, status phase: Pending. Waiting for statefulset controller to delete. +Jun 18 07:41:12.543: INFO: Observed stateful pod in namespace: e2e-tests-statefulset-6r7nw, name: ss-0, uid: 70d46bdb-919c-11e9-8cfd-00163e000a67, status phase: Failed. Waiting for statefulset controller to delete. +Jun 18 07:41:12.559: INFO: Observed stateful pod in namespace: e2e-tests-statefulset-6r7nw, name: ss-0, uid: 70d46bdb-919c-11e9-8cfd-00163e000a67, status phase: Failed. Waiting for statefulset controller to delete. +Jun 18 07:41:12.584: INFO: Observed delete event for stateful pod ss-0 in namespace e2e-tests-statefulset-6r7nw +STEP: Removing pod with conflicting port in namespace e2e-tests-statefulset-6r7nw +STEP: Waiting when stateful pod ss-0 will be recreated in namespace e2e-tests-statefulset-6r7nw and will be in running state +[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:85 +Jun 18 07:41:22.727: INFO: Deleting all statefulset in ns e2e-tests-statefulset-6r7nw +Jun 18 07:41:23.530: INFO: Scaling statefulset ss to 0 +Jun 18 07:41:44.613: INFO: Waiting for statefulset status.replicas updated to 0 +Jun 18 07:41:44.619: INFO: Deleting statefulset ss +[AfterEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:41:44.640: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-statefulset-6r7nw" for this suite. +Jun 18 07:41:54.658: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:41:54.705: INFO: namespace: e2e-tests-statefulset-6r7nw, resource: bindings, ignored listing per whitelist +Jun 18 07:41:54.969: INFO: namespace e2e-tests-statefulset-6r7nw deletion completed in 10.32535426s + +• [SLOW TEST:51.439 seconds] +[sig-apps] StatefulSet +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + Should recreate evicted statefulset [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] EmptyDir wrapper volumes + should not conflict [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] EmptyDir wrapper volumes + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:41:54.969: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename emptydir-wrapper +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-wrapper-wcnhz +STEP: Waiting for a default service account to be provisioned in namespace +[It] should not conflict [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Cleaning up the secret +STEP: Cleaning up the configmap +STEP: Cleaning up the pod +[AfterEach] [sig-storage] EmptyDir wrapper volumes + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:41:57.672: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-emptydir-wrapper-wcnhz" for this suite. +Jun 18 07:42:05.693: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:42:06.545: INFO: namespace: e2e-tests-emptydir-wrapper-wcnhz, resource: bindings, ignored listing per whitelist +Jun 18 07:42:06.567: INFO: namespace e2e-tests-emptydir-wrapper-wcnhz deletion completed in 8.891950405s + +• [SLOW TEST:11.598 seconds] +[sig-storage] EmptyDir wrapper volumes +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22 + should not conflict [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl expose + should create services for rc [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:42:06.567: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-kj5jb +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[It] should create services for rc [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating Redis RC +Jun 18 07:42:07.515: INFO: namespace e2e-tests-kubectl-kj5jb +Jun 18 07:42:07.515: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 create -f - --namespace=e2e-tests-kubectl-kj5jb' +Jun 18 07:42:09.589: INFO: stderr: "" +Jun 18 07:42:09.589: INFO: stdout: "replicationcontroller/redis-master created\n" +STEP: Waiting for Redis master to start. +Jun 18 07:42:10.592: INFO: Selector matched 1 pods for map[app:redis] +Jun 18 07:42:10.592: INFO: Found 0 / 1 +Jun 18 07:42:11.597: INFO: Selector matched 1 pods for map[app:redis] +Jun 18 07:42:11.597: INFO: Found 0 / 1 +Jun 18 07:42:12.592: INFO: Selector matched 1 pods for map[app:redis] +Jun 18 07:42:12.592: INFO: Found 1 / 1 +Jun 18 07:42:12.592: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 +Jun 18 07:42:12.594: INFO: Selector matched 1 pods for map[app:redis] +Jun 18 07:42:12.594: INFO: ForEach: Found 1 pods from the filter. Now looping through them. +Jun 18 07:42:12.594: INFO: wait on redis-master startup in e2e-tests-kubectl-kj5jb +Jun 18 07:42:12.594: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 logs redis-master-rcf6k redis-master --namespace=e2e-tests-kubectl-kj5jb' +Jun 18 07:42:12.715: INFO: stderr: "" +Jun 18 07:42:12.715: INFO: stdout: " _._ \n _.-``__ ''-._ \n _.-`` `. `_. ''-._ Redis 3.2.12 (35a5711f/0) 64 bit\n .-`` .-```. ```\\/ _.,_ ''-._ \n ( ' , .-` | `, ) Running in standalone mode\n |`-._`-...-` __...-.``-._|'` _.-'| Port: 6379\n | `-._ `._ / _.-' | PID: 1\n `-._ `-._ `-./ _.-' _.-' \n |`-._`-._ `-.__.-' _.-'_.-'| \n | `-._`-._ _.-'_.-' | http://redis.io \n `-._ `-._`-.__.-'_.-' _.-' \n |`-._`-._ `-.__.-' _.-'_.-'| \n | `-._`-._ _.-'_.-' | \n `-._ `-._`-.__.-'_.-' _.-' \n `-._ `-.__.-' _.-' \n `-._ _.-' \n `-.__.-' \n\n1:M 18 Jun 07:42:11.787 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.\n1:M 18 Jun 07:42:11.787 # Server started, Redis version 3.2.12\n1:M 18 Jun 07:42:11.787 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.\n1:M 18 Jun 07:42:11.787 * The server is now ready to accept connections on port 6379\n" +STEP: exposing RC +Jun 18 07:42:12.716: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 expose rc redis-master --name=rm2 --port=1234 --target-port=6379 --namespace=e2e-tests-kubectl-kj5jb' +Jun 18 07:42:12.812: INFO: stderr: "" +Jun 18 07:42:12.812: INFO: stdout: "service/rm2 exposed\n" +Jun 18 07:42:12.815: INFO: Service rm2 in namespace e2e-tests-kubectl-kj5jb found. +STEP: exposing service +Jun 18 07:42:14.820: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 expose service rm2 --name=rm3 --port=2345 --target-port=6379 --namespace=e2e-tests-kubectl-kj5jb' +Jun 18 07:42:14.910: INFO: stderr: "" +Jun 18 07:42:14.910: INFO: stdout: "service/rm3 exposed\n" +Jun 18 07:42:14.915: INFO: Service rm3 in namespace e2e-tests-kubectl-kj5jb found. +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:42:16.920: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-kj5jb" for this suite. +Jun 18 07:42:43.547: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:42:44.524: INFO: namespace: e2e-tests-kubectl-kj5jb, resource: bindings, ignored listing per whitelist +Jun 18 07:42:44.558: INFO: namespace e2e-tests-kubectl-kj5jb deletion completed in 27.634672083s + +• [SLOW TEST:37.991 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Kubectl expose + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should create services for rc [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSS +------------------------------ +[sig-storage] Projected secret + should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected secret + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:42:44.558: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-c24fp +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating projection with secret that has name projected-secret-test-a8fb3d36-919c-11e9-bbf5-0e74dabf3615 +STEP: Creating a pod to test consume secrets +Jun 18 07:42:45.526: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-a970660d-919c-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-projected-c24fp" to be "success or failure" +Jun 18 07:42:45.532: INFO: Pod "pod-projected-secrets-a970660d-919c-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 6.394413ms +Jun 18 07:42:47.557: INFO: Pod "pod-projected-secrets-a970660d-919c-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.031256471s +STEP: Saw pod success +Jun 18 07:42:47.557: INFO: Pod "pod-projected-secrets-a970660d-919c-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:42:47.564: INFO: Trying to get logs from node node5 pod pod-projected-secrets-a970660d-919c-11e9-bbf5-0e74dabf3615 container projected-secret-volume-test: +STEP: delete the pod +Jun 18 07:42:47.589: INFO: Waiting for pod pod-projected-secrets-a970660d-919c-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:42:47.592: INFO: Pod pod-projected-secrets-a970660d-919c-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] Projected secret + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:42:47.592: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-c24fp" for this suite. +Jun 18 07:42:55.609: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:42:55.685: INFO: namespace: e2e-tests-projected-c24fp, resource: bindings, ignored listing per whitelist +Jun 18 07:42:55.923: INFO: namespace e2e-tests-projected-c24fp deletion completed in 8.327062211s + +• [SLOW TEST:11.364 seconds] +[sig-storage] Projected secret +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:34 + should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl logs + should be able to retrieve and filter logs [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:42:55.923: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-76htn +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[BeforeEach] [k8s.io] Kubectl logs + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1134 +STEP: creating an rc +Jun 18 07:42:56.627: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 create -f - --namespace=e2e-tests-kubectl-76htn' +Jun 18 07:42:57.516: INFO: stderr: "" +Jun 18 07:42:57.516: INFO: stdout: "replicationcontroller/redis-master created\n" +[It] should be able to retrieve and filter logs [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Waiting for Redis master to start. +Jun 18 07:42:58.528: INFO: Selector matched 1 pods for map[app:redis] +Jun 18 07:42:58.528: INFO: Found 0 / 1 +Jun 18 07:42:59.521: INFO: Selector matched 1 pods for map[app:redis] +Jun 18 07:42:59.521: INFO: Found 1 / 1 +Jun 18 07:42:59.521: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 +Jun 18 07:42:59.525: INFO: Selector matched 1 pods for map[app:redis] +Jun 18 07:42:59.525: INFO: ForEach: Found 1 pods from the filter. Now looping through them. +STEP: checking for a matching strings +Jun 18 07:42:59.525: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 logs redis-master-5ckwl redis-master --namespace=e2e-tests-kubectl-76htn' +Jun 18 07:42:59.612: INFO: stderr: "" +Jun 18 07:42:59.612: INFO: stdout: " _._ \n _.-``__ ''-._ \n _.-`` `. `_. ''-._ Redis 3.2.12 (35a5711f/0) 64 bit\n .-`` .-```. ```\\/ _.,_ ''-._ \n ( ' , .-` | `, ) Running in standalone mode\n |`-._`-...-` __...-.``-._|'` _.-'| Port: 6379\n | `-._ `._ / _.-' | PID: 1\n `-._ `-._ `-./ _.-' _.-' \n |`-._`-._ `-.__.-' _.-'_.-'| \n | `-._`-._ _.-'_.-' | http://redis.io \n `-._ `-._`-.__.-'_.-' _.-' \n |`-._`-._ `-.__.-' _.-'_.-'| \n | `-._`-._ _.-'_.-' | \n `-._ `-._`-.__.-'_.-' _.-' \n `-._ `-.__.-' _.-' \n `-._ _.-' \n `-.__.-' \n\n1:M 18 Jun 07:42:58.798 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.\n1:M 18 Jun 07:42:58.798 # Server started, Redis version 3.2.12\n1:M 18 Jun 07:42:58.798 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.\n1:M 18 Jun 07:42:58.798 * The server is now ready to accept connections on port 6379\n" +STEP: limiting log lines +Jun 18 07:42:59.612: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 log redis-master-5ckwl redis-master --namespace=e2e-tests-kubectl-76htn --tail=1' +Jun 18 07:42:59.698: INFO: stderr: "" +Jun 18 07:42:59.698: INFO: stdout: "1:M 18 Jun 07:42:58.798 * The server is now ready to accept connections on port 6379\n" +STEP: limiting log bytes +Jun 18 07:42:59.698: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 log redis-master-5ckwl redis-master --namespace=e2e-tests-kubectl-76htn --limit-bytes=1' +Jun 18 07:42:59.785: INFO: stderr: "" +Jun 18 07:42:59.785: INFO: stdout: " " +STEP: exposing timestamps +Jun 18 07:42:59.785: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 log redis-master-5ckwl redis-master --namespace=e2e-tests-kubectl-76htn --tail=1 --timestamps' +Jun 18 07:42:59.872: INFO: stderr: "" +Jun 18 07:42:59.872: INFO: stdout: "2019-06-18T07:42:58.79909267Z 1:M 18 Jun 07:42:58.798 * The server is now ready to accept connections on port 6379\n" +STEP: restricting to a time range +Jun 18 07:43:02.373: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 log redis-master-5ckwl redis-master --namespace=e2e-tests-kubectl-76htn --since=1s' +Jun 18 07:43:02.535: INFO: stderr: "" +Jun 18 07:43:02.535: INFO: stdout: "" +Jun 18 07:43:02.535: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 log redis-master-5ckwl redis-master --namespace=e2e-tests-kubectl-76htn --since=24h' +Jun 18 07:43:02.628: INFO: stderr: "" +Jun 18 07:43:02.628: INFO: stdout: " _._ \n _.-``__ ''-._ \n _.-`` `. `_. ''-._ Redis 3.2.12 (35a5711f/0) 64 bit\n .-`` .-```. ```\\/ _.,_ ''-._ \n ( ' , .-` | `, ) Running in standalone mode\n |`-._`-...-` __...-.``-._|'` _.-'| Port: 6379\n | `-._ `._ / _.-' | PID: 1\n `-._ `-._ `-./ _.-' _.-' \n |`-._`-._ `-.__.-' _.-'_.-'| \n | `-._`-._ _.-'_.-' | http://redis.io \n `-._ `-._`-.__.-'_.-' _.-' \n |`-._`-._ `-.__.-' _.-'_.-'| \n | `-._`-._ _.-'_.-' | \n `-._ `-._`-.__.-'_.-' _.-' \n `-._ `-.__.-' _.-' \n `-._ _.-' \n `-.__.-' \n\n1:M 18 Jun 07:42:58.798 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.\n1:M 18 Jun 07:42:58.798 # Server started, Redis version 3.2.12\n1:M 18 Jun 07:42:58.798 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.\n1:M 18 Jun 07:42:58.798 * The server is now ready to accept connections on port 6379\n" +[AfterEach] [k8s.io] Kubectl logs + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1140 +STEP: using delete to clean up resources +Jun 18 07:43:02.628: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-76htn' +Jun 18 07:43:02.707: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Jun 18 07:43:02.707: INFO: stdout: "replicationcontroller \"redis-master\" force deleted\n" +Jun 18 07:43:02.707: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get rc,svc -l name=nginx --no-headers --namespace=e2e-tests-kubectl-76htn' +Jun 18 07:43:02.795: INFO: stderr: "No resources found.\n" +Jun 18 07:43:02.795: INFO: stdout: "" +Jun 18 07:43:02.795: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods -l name=nginx --namespace=e2e-tests-kubectl-76htn -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' +Jun 18 07:43:03.534: INFO: stderr: "" +Jun 18 07:43:03.534: INFO: stdout: "" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:43:03.534: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-76htn" for this suite. +Jun 18 07:43:11.555: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:43:11.617: INFO: namespace: e2e-tests-kubectl-76htn, resource: bindings, ignored listing per whitelist +Jun 18 07:43:11.872: INFO: namespace e2e-tests-kubectl-76htn deletion completed in 8.32928312s + +• [SLOW TEST:15.949 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Kubectl logs + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should be able to retrieve and filter logs [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +S +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl version + should check is all data is printed [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:43:11.872: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-wszjb +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[It] should check is all data is printed [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +Jun 18 07:43:12.521: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 version' +Jun 18 07:43:12.595: INFO: stderr: "" +Jun 18 07:43:12.595: INFO: stdout: "Client Version: version.Info{Major:\"1\", Minor:\"13\", GitVersion:\"v1.13.5\", GitCommit:\"2166946f41b36dea2c4626f90a77706f426cdea2\", GitTreeState:\"clean\", BuildDate:\"2019-03-25T15:26:52Z\", GoVersion:\"go1.11.5\", Compiler:\"gc\", Platform:\"linux/amd64\"}\nServer Version: version.Info{Major:\"1\", Minor:\"13\", GitVersion:\"v1.13.5\", GitCommit:\"2166946f41b36dea2c4626f90a77706f426cdea2\", GitTreeState:\"clean\", BuildDate:\"2019-03-25T15:19:22Z\", GoVersion:\"go1.11.5\", Compiler:\"gc\", Platform:\"linux/amd64\"}\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:43:12.595: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-wszjb" for this suite. +Jun 18 07:43:20.613: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:43:20.716: INFO: namespace: e2e-tests-kubectl-wszjb, resource: bindings, ignored listing per whitelist +Jun 18 07:43:21.560: INFO: namespace e2e-tests-kubectl-wszjb deletion completed in 8.961340496s + +• [SLOW TEST:9.688 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Kubectl version + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should check is all data is printed [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSS +------------------------------ +[sig-api-machinery] Namespaces [Serial] + should ensure that all services are removed when a namespace is deleted [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-api-machinery] Namespaces [Serial] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:43:21.560: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename namespaces +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-namespaces-fc468 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should ensure that all services are removed when a namespace is deleted [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a test namespace +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-nsdeletetest-cscj7 +STEP: Waiting for a default service account to be provisioned in namespace +STEP: Creating a service in the namespace +STEP: Deleting the namespace +STEP: Waiting for the namespace to be removed. +STEP: Recreating the namespace +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-nsdeletetest-mpzqn +STEP: Verifying there is no service in the namespace +[AfterEach] [sig-api-machinery] Namespaces [Serial] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:43:30.831: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-namespaces-fc468" for this suite. +Jun 18 07:43:39.525: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:43:39.560: INFO: namespace: e2e-tests-namespaces-fc468, resource: bindings, ignored listing per whitelist +Jun 18 07:43:39.845: INFO: namespace e2e-tests-namespaces-fc468 deletion completed in 9.010793608s +STEP: Destroying namespace "e2e-tests-nsdeletetest-cscj7" for this suite. +Jun 18 07:43:39.848: INFO: Namespace e2e-tests-nsdeletetest-cscj7 was already deleted +STEP: Destroying namespace "e2e-tests-nsdeletetest-mpzqn" for this suite. +Jun 18 07:43:47.857: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:43:48.661: INFO: namespace: e2e-tests-nsdeletetest-mpzqn, resource: bindings, ignored listing per whitelist +Jun 18 07:43:48.819: INFO: namespace e2e-tests-nsdeletetest-mpzqn deletion completed in 8.971716502s + +• [SLOW TEST:27.259 seconds] +[sig-api-machinery] Namespaces [Serial] +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + should ensure that all services are removed when a namespace is deleted [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-storage] Secrets + should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Secrets + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:43:48.820: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename secrets +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-secrets-nfnl9 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-secret-namespace-5xkpl +STEP: Creating secret with name secret-test-cfa964db-919c-11e9-bbf5-0e74dabf3615 +STEP: Creating a pod to test consume secrets +Jun 18 07:43:50.519: INFO: Waiting up to 5m0s for pod "pod-secrets-cfc3e8cd-919c-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-secrets-nfnl9" to be "success or failure" +Jun 18 07:43:50.527: INFO: Pod "pod-secrets-cfc3e8cd-919c-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 8.263992ms +Jun 18 07:43:52.539: INFO: Pod "pod-secrets-cfc3e8cd-919c-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.020354944s +STEP: Saw pod success +Jun 18 07:43:52.539: INFO: Pod "pod-secrets-cfc3e8cd-919c-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:43:52.542: INFO: Trying to get logs from node node5 pod pod-secrets-cfc3e8cd-919c-11e9-bbf5-0e74dabf3615 container secret-volume-test: +STEP: delete the pod +Jun 18 07:43:52.569: INFO: Waiting for pod pod-secrets-cfc3e8cd-919c-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:43:52.573: INFO: Pod pod-secrets-cfc3e8cd-919c-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] Secrets + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:43:52.573: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-secrets-nfnl9" for this suite. +Jun 18 07:44:00.589: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:44:00.700: INFO: namespace: e2e-tests-secrets-nfnl9, resource: bindings, ignored listing per whitelist +Jun 18 07:44:00.908: INFO: namespace e2e-tests-secrets-nfnl9 deletion completed in 8.331513153s +STEP: Destroying namespace "e2e-tests-secret-namespace-5xkpl" for this suite. +Jun 18 07:44:09.510: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:44:09.611: INFO: namespace: e2e-tests-secret-namespace-5xkpl, resource: bindings, ignored listing per whitelist +Jun 18 07:44:09.838: INFO: namespace e2e-tests-secret-namespace-5xkpl deletion completed in 8.929802078s + +• [SLOW TEST:21.019 seconds] +[sig-storage] Secrets +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:34 + should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-storage] ConfigMap + should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:44:09.838: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename configmap +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-configmap-b6gdv +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap with name configmap-test-volume-map-dc2d7b0d-919c-11e9-bbf5-0e74dabf3615 +STEP: Creating a pod to test consume configMaps +Jun 18 07:44:10.649: INFO: Waiting up to 5m0s for pod "pod-configmaps-dc2e4392-919c-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-configmap-b6gdv" to be "success or failure" +Jun 18 07:44:10.652: INFO: Pod "pod-configmaps-dc2e4392-919c-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.614789ms +Jun 18 07:44:12.655: INFO: Pod "pod-configmaps-dc2e4392-919c-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.0052002s +STEP: Saw pod success +Jun 18 07:44:12.655: INFO: Pod "pod-configmaps-dc2e4392-919c-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:44:12.657: INFO: Trying to get logs from node node5 pod pod-configmaps-dc2e4392-919c-11e9-bbf5-0e74dabf3615 container configmap-volume-test: +STEP: delete the pod +Jun 18 07:44:12.679: INFO: Waiting for pod pod-configmaps-dc2e4392-919c-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:44:12.688: INFO: Pod pod-configmaps-dc2e4392-919c-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:44:12.688: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-configmap-b6gdv" for this suite. +Jun 18 07:44:21.541: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:44:21.602: INFO: namespace: e2e-tests-configmap-b6gdv, resource: bindings, ignored listing per whitelist +Jun 18 07:44:22.528: INFO: namespace e2e-tests-configmap-b6gdv deletion completed in 9.836678738s + +• [SLOW TEST:12.690 seconds] +[sig-storage] ConfigMap +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33 + should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSS +------------------------------ +[sig-api-machinery] Watchers + should observe add, update, and delete watch notifications on configmaps [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-api-machinery] Watchers + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:44:22.528: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename watch +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-watch-wn7kk +STEP: Waiting for a default service account to be provisioned in namespace +[It] should observe add, update, and delete watch notifications on configmaps [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating a watch on configmaps with label A +STEP: creating a watch on configmaps with label B +STEP: creating a watch on configmaps with label A or B +STEP: creating a configmap with label A and ensuring the correct watchers observe the notification +Jun 18 07:44:22.724: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:e2e-tests-watch-wn7kk,SelfLink:/api/v1/namespaces/e2e-tests-watch-wn7kk/configmaps/e2e-watch-test-configmap-a,UID:e360a84d-919c-11e9-8cfd-00163e000a67,ResourceVersion:13535768,Generation:0,CreationTimestamp:2019-06-18 07:44:22 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{},BinaryData:map[string][]byte{},} +Jun 18 07:44:22.726: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:e2e-tests-watch-wn7kk,SelfLink:/api/v1/namespaces/e2e-tests-watch-wn7kk/configmaps/e2e-watch-test-configmap-a,UID:e360a84d-919c-11e9-8cfd-00163e000a67,ResourceVersion:13535768,Generation:0,CreationTimestamp:2019-06-18 07:44:22 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{},BinaryData:map[string][]byte{},} +STEP: modifying configmap A and ensuring the correct watchers observe the notification +Jun 18 07:44:32.731: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:e2e-tests-watch-wn7kk,SelfLink:/api/v1/namespaces/e2e-tests-watch-wn7kk/configmaps/e2e-watch-test-configmap-a,UID:e360a84d-919c-11e9-8cfd-00163e000a67,ResourceVersion:13535811,Generation:0,CreationTimestamp:2019-06-18 07:44:22 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},} +Jun 18 07:44:32.731: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:e2e-tests-watch-wn7kk,SelfLink:/api/v1/namespaces/e2e-tests-watch-wn7kk/configmaps/e2e-watch-test-configmap-a,UID:e360a84d-919c-11e9-8cfd-00163e000a67,ResourceVersion:13535811,Generation:0,CreationTimestamp:2019-06-18 07:44:22 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},} +STEP: modifying configmap A again and ensuring the correct watchers observe the notification +Jun 18 07:44:42.738: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:e2e-tests-watch-wn7kk,SelfLink:/api/v1/namespaces/e2e-tests-watch-wn7kk/configmaps/e2e-watch-test-configmap-a,UID:e360a84d-919c-11e9-8cfd-00163e000a67,ResourceVersion:13535852,Generation:0,CreationTimestamp:2019-06-18 07:44:22 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},} +Jun 18 07:44:42.738: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:e2e-tests-watch-wn7kk,SelfLink:/api/v1/namespaces/e2e-tests-watch-wn7kk/configmaps/e2e-watch-test-configmap-a,UID:e360a84d-919c-11e9-8cfd-00163e000a67,ResourceVersion:13535852,Generation:0,CreationTimestamp:2019-06-18 07:44:22 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},} +STEP: deleting configmap A and ensuring the correct watchers observe the notification +Jun 18 07:44:52.742: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:e2e-tests-watch-wn7kk,SelfLink:/api/v1/namespaces/e2e-tests-watch-wn7kk/configmaps/e2e-watch-test-configmap-a,UID:e360a84d-919c-11e9-8cfd-00163e000a67,ResourceVersion:13535894,Generation:0,CreationTimestamp:2019-06-18 07:44:22 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},} +Jun 18 07:44:52.742: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:e2e-tests-watch-wn7kk,SelfLink:/api/v1/namespaces/e2e-tests-watch-wn7kk/configmaps/e2e-watch-test-configmap-a,UID:e360a84d-919c-11e9-8cfd-00163e000a67,ResourceVersion:13535894,Generation:0,CreationTimestamp:2019-06-18 07:44:22 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},} +STEP: creating a configmap with label B and ensuring the correct watchers observe the notification +Jun 18 07:45:02.747: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-b,GenerateName:,Namespace:e2e-tests-watch-wn7kk,SelfLink:/api/v1/namespaces/e2e-tests-watch-wn7kk/configmaps/e2e-watch-test-configmap-b,UID:fb3b79b1-919c-11e9-8cfd-00163e000a67,ResourceVersion:13535937,Generation:0,CreationTimestamp:2019-06-18 07:45:02 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-B,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{},BinaryData:map[string][]byte{},} +Jun 18 07:45:02.748: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-b,GenerateName:,Namespace:e2e-tests-watch-wn7kk,SelfLink:/api/v1/namespaces/e2e-tests-watch-wn7kk/configmaps/e2e-watch-test-configmap-b,UID:fb3b79b1-919c-11e9-8cfd-00163e000a67,ResourceVersion:13535937,Generation:0,CreationTimestamp:2019-06-18 07:45:02 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-B,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{},BinaryData:map[string][]byte{},} +STEP: deleting configmap B and ensuring the correct watchers observe the notification +Jun 18 07:45:12.754: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-b,GenerateName:,Namespace:e2e-tests-watch-wn7kk,SelfLink:/api/v1/namespaces/e2e-tests-watch-wn7kk/configmaps/e2e-watch-test-configmap-b,UID:fb3b79b1-919c-11e9-8cfd-00163e000a67,ResourceVersion:13535978,Generation:0,CreationTimestamp:2019-06-18 07:45:02 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-B,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{},BinaryData:map[string][]byte{},} +Jun 18 07:45:12.754: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-b,GenerateName:,Namespace:e2e-tests-watch-wn7kk,SelfLink:/api/v1/namespaces/e2e-tests-watch-wn7kk/configmaps/e2e-watch-test-configmap-b,UID:fb3b79b1-919c-11e9-8cfd-00163e000a67,ResourceVersion:13535978,Generation:0,CreationTimestamp:2019-06-18 07:45:02 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-B,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{},BinaryData:map[string][]byte{},} +[AfterEach] [sig-api-machinery] Watchers + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:45:22.754: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-watch-wn7kk" for this suite. +Jun 18 07:45:29.522: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:45:30.630: INFO: namespace: e2e-tests-watch-wn7kk, resource: bindings, ignored listing per whitelist +Jun 18 07:45:30.672: INFO: namespace e2e-tests-watch-wn7kk deletion completed in 7.913960221s + +• [SLOW TEST:68.144 seconds] +[sig-api-machinery] Watchers +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + should observe add, update, and delete watch notifications on configmaps [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + Burst scaling should run to completion even with unhealthy pods [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:45:30.673: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename statefulset +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-statefulset-xnvlx +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:59 +[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:74 +STEP: Creating service test in namespace e2e-tests-statefulset-xnvlx +[It] Burst scaling should run to completion even with unhealthy pods [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating stateful set ss in namespace e2e-tests-statefulset-xnvlx +STEP: Waiting until all stateful set ss replicas will be running in namespace e2e-tests-statefulset-xnvlx +Jun 18 07:45:31.692: INFO: Found 0 stateful pods, waiting for 1 +Jun 18 07:45:42.590: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +STEP: Confirming that stateful set scale up will not halt with unhealthy stateful pod +Jun 18 07:45:42.592: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 exec --namespace=e2e-tests-statefulset-xnvlx ss-0 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +Jun 18 07:45:43.517: INFO: stderr: "" +Jun 18 07:45:43.517: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +Jun 18 07:45:43.517: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-0: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +Jun 18 07:45:43.527: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=true +Jun 18 07:45:53.553: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false +Jun 18 07:45:53.553: INFO: Waiting for statefulset status.replicas updated to 0 +Jun 18 07:45:53.626: INFO: POD NODE PHASE GRACE CONDITIONS +Jun 18 07:45:53.626: INFO: ss-0 node5 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:45:31 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:45:43 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:45:43 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:45:31 +0000 UTC }] +Jun 18 07:45:53.626: INFO: +Jun 18 07:45:53.626: INFO: StatefulSet ss has not reached scale 3, at 1 +Jun 18 07:45:54.629: INFO: Verifying statefulset ss doesn't scale past 3 for another 8.994385971s +Jun 18 07:45:55.632: INFO: Verifying statefulset ss doesn't scale past 3 for another 7.991527315s +Jun 18 07:45:56.642: INFO: Verifying statefulset ss doesn't scale past 3 for another 6.988078523s +Jun 18 07:45:57.645: INFO: Verifying statefulset ss doesn't scale past 3 for another 5.978123841s +Jun 18 07:45:58.656: INFO: Verifying statefulset ss doesn't scale past 3 for another 4.974764097s +Jun 18 07:45:59.666: INFO: Verifying statefulset ss doesn't scale past 3 for another 3.963927987s +Jun 18 07:46:00.671: INFO: Verifying statefulset ss doesn't scale past 3 for another 2.954525526s +Jun 18 07:46:01.676: INFO: Verifying statefulset ss doesn't scale past 3 for another 1.948925488s +Jun 18 07:46:02.680: INFO: Verifying statefulset ss doesn't scale past 3 for another 944.113438ms +STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace e2e-tests-statefulset-xnvlx +Jun 18 07:46:04.674: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 exec --namespace=e2e-tests-statefulset-xnvlx ss-0 -- /bin/sh -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 18 07:46:05.657: INFO: stderr: "" +Jun 18 07:46:05.657: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n" +Jun 18 07:46:05.657: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-0: '/tmp/index.html' -> '/usr/share/nginx/html/index.html' + +Jun 18 07:46:05.657: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 exec --namespace=e2e-tests-statefulset-xnvlx ss-1 -- /bin/sh -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 18 07:46:06.632: INFO: stderr: "mv: can't rename '/tmp/index.html': No such file or directory\n" +Jun 18 07:46:06.632: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n" +Jun 18 07:46:06.632: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-1: '/tmp/index.html' -> '/usr/share/nginx/html/index.html' + +Jun 18 07:46:06.632: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 exec --namespace=e2e-tests-statefulset-xnvlx ss-2 -- /bin/sh -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 18 07:46:07.599: INFO: stderr: "mv: can't rename '/tmp/index.html': No such file or directory\n" +Jun 18 07:46:07.599: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n" +Jun 18 07:46:07.599: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-2: '/tmp/index.html' -> '/usr/share/nginx/html/index.html' + +Jun 18 07:46:07.602: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +Jun 18 07:46:07.602: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true +Jun 18 07:46:07.602: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Scale down will not halt with unhealthy stateful pod +Jun 18 07:46:07.604: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 exec --namespace=e2e-tests-statefulset-xnvlx ss-0 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +Jun 18 07:46:07.767: INFO: stderr: "" +Jun 18 07:46:07.767: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +Jun 18 07:46:07.767: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-0: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +Jun 18 07:46:07.767: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 exec --namespace=e2e-tests-statefulset-xnvlx ss-1 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +Jun 18 07:46:08.627: INFO: stderr: "" +Jun 18 07:46:08.627: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +Jun 18 07:46:08.627: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-1: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +Jun 18 07:46:08.627: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 exec --namespace=e2e-tests-statefulset-xnvlx ss-2 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +Jun 18 07:46:11.516: INFO: stderr: "" +Jun 18 07:46:11.516: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +Jun 18 07:46:11.516: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-2: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +Jun 18 07:46:11.516: INFO: Waiting for statefulset status.replicas updated to 0 +Jun 18 07:46:11.564: INFO: Waiting for stateful set status.readyReplicas to become 0, currently 1 +Jun 18 07:46:21.569: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false +Jun 18 07:46:21.569: INFO: Waiting for pod ss-1 to enter Running - Ready=false, currently Running - Ready=false +Jun 18 07:46:21.569: INFO: Waiting for pod ss-2 to enter Running - Ready=false, currently Running - Ready=false +Jun 18 07:46:21.581: INFO: POD NODE PHASE GRACE CONDITIONS +Jun 18 07:46:21.581: INFO: ss-0 node5 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:45:31 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:46:08 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:46:08 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:45:31 +0000 UTC }] +Jun 18 07:46:21.581: INFO: ss-1 node2 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:45:53 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:46:09 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:46:09 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:45:53 +0000 UTC }] +Jun 18 07:46:21.581: INFO: ss-2 node3 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:45:53 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:46:11 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:46:11 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:45:53 +0000 UTC }] +Jun 18 07:46:21.581: INFO: +Jun 18 07:46:21.581: INFO: StatefulSet ss has not reached scale 0, at 3 +Jun 18 07:46:22.590: INFO: POD NODE PHASE GRACE CONDITIONS +Jun 18 07:46:22.590: INFO: ss-0 node5 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:45:31 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:46:08 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:46:08 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:45:31 +0000 UTC }] +Jun 18 07:46:22.590: INFO: ss-1 node2 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:45:53 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:46:09 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:46:09 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:45:53 +0000 UTC }] +Jun 18 07:46:22.590: INFO: ss-2 node3 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:45:53 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:46:11 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:46:11 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:45:53 +0000 UTC }] +Jun 18 07:46:22.590: INFO: +Jun 18 07:46:22.596: INFO: StatefulSet ss has not reached scale 0, at 3 +Jun 18 07:46:23.602: INFO: POD NODE PHASE GRACE CONDITIONS +Jun 18 07:46:23.602: INFO: ss-0 node5 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:45:31 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:46:08 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:46:08 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:45:31 +0000 UTC }] +Jun 18 07:46:23.602: INFO: ss-1 node2 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:45:53 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:46:09 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:46:09 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:45:53 +0000 UTC }] +Jun 18 07:46:23.602: INFO: ss-2 node3 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:45:53 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:46:11 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:46:11 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:45:53 +0000 UTC }] +Jun 18 07:46:23.602: INFO: +Jun 18 07:46:23.602: INFO: StatefulSet ss has not reached scale 0, at 3 +Jun 18 07:46:24.607: INFO: POD NODE PHASE GRACE CONDITIONS +Jun 18 07:46:24.607: INFO: ss-1 node2 Pending 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:45:53 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:46:09 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:46:09 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:45:53 +0000 UTC }] +Jun 18 07:46:24.607: INFO: ss-2 node3 Pending 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:45:53 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:46:11 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:46:11 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:45:53 +0000 UTC }] +Jun 18 07:46:24.607: INFO: +Jun 18 07:46:24.607: INFO: StatefulSet ss has not reached scale 0, at 2 +Jun 18 07:46:26.572: INFO: Verifying statefulset ss doesn't scale past 0 for another 5.967548435s +Jun 18 07:46:27.583: INFO: Verifying statefulset ss doesn't scale past 0 for another 4.002432063s +Jun 18 07:46:28.586: INFO: Verifying statefulset ss doesn't scale past 0 for another 2.991482843s +Jun 18 07:46:29.605: INFO: Verifying statefulset ss doesn't scale past 0 for another 1.988477518s +Jun 18 07:46:30.611: INFO: Verifying statefulset ss doesn't scale past 0 for another 969.880431ms +STEP: Scaling down stateful set ss to 0 replicas and waiting until none of pods will run in namespacee2e-tests-statefulset-xnvlx +Jun 18 07:46:31.615: INFO: Scaling statefulset ss to 0 +Jun 18 07:46:31.624: INFO: Waiting for statefulset status.replicas updated to 0 +[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:85 +Jun 18 07:46:31.626: INFO: Deleting all statefulset in ns e2e-tests-statefulset-xnvlx +Jun 18 07:46:31.628: INFO: Scaling statefulset ss to 0 +Jun 18 07:46:31.638: INFO: Waiting for statefulset status.replicas updated to 0 +Jun 18 07:46:31.641: INFO: Deleting statefulset ss +[AfterEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:46:31.657: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-statefulset-xnvlx" for this suite. +Jun 18 07:46:43.679: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:46:44.636: INFO: namespace: e2e-tests-statefulset-xnvlx, resource: bindings, ignored listing per whitelist +Jun 18 07:46:44.638: INFO: namespace e2e-tests-statefulset-xnvlx deletion completed in 12.977240742s + +• [SLOW TEST:73.965 seconds] +[sig-apps] StatefulSet +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + Burst scaling should run to completion even with unhealthy pods [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-network] DNS + should provide DNS for the cluster [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-network] DNS + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:46:44.638: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename dns +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-dns-zx2b9 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide DNS for the cluster [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default A)" && test -n "$$check" && echo OK > /results/wheezy_udp@kubernetes.default;check="$$(dig +tcp +noall +answer +search kubernetes.default A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@kubernetes.default;check="$$(dig +notcp +noall +answer +search kubernetes.default.svc A)" && test -n "$$check" && echo OK > /results/wheezy_udp@kubernetes.default.svc;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@kubernetes.default.svc;check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@kubernetes.default.svc.cluster.local;test -n "$$(getent hosts dns-querier-1.dns-test-service.e2e-tests-dns-zx2b9.svc.cluster.local)" && echo OK > /results/wheezy_hosts@dns-querier-1.dns-test-service.e2e-tests-dns-zx2b9.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/wheezy_hosts@dns-querier-1;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".e2e-tests-dns-zx2b9.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;sleep 1; done + +STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default A)" && test -n "$$check" && echo OK > /results/jessie_udp@kubernetes.default;check="$$(dig +tcp +noall +answer +search kubernetes.default A)" && test -n "$$check" && echo OK > /results/jessie_tcp@kubernetes.default;check="$$(dig +notcp +noall +answer +search kubernetes.default.svc A)" && test -n "$$check" && echo OK > /results/jessie_udp@kubernetes.default.svc;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc A)" && test -n "$$check" && echo OK > /results/jessie_tcp@kubernetes.default.svc;check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@kubernetes.default.svc.cluster.local;test -n "$$(getent hosts dns-querier-1.dns-test-service.e2e-tests-dns-zx2b9.svc.cluster.local)" && echo OK > /results/jessie_hosts@dns-querier-1.dns-test-service.e2e-tests-dns-zx2b9.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/jessie_hosts@dns-querier-1;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".e2e-tests-dns-zx2b9.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;sleep 1; done + +STEP: creating a pod to probe DNS +STEP: submitting the pod to kubernetes +STEP: retrieving the pod +STEP: looking for the results for each expected name from probers +Jun 18 07:46:49.681: INFO: DNS probes using e2e-tests-dns-zx2b9/dns-test-387da809-919d-11e9-bbf5-0e74dabf3615 succeeded + +STEP: deleting the pod +[AfterEach] [sig-network] DNS + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:46:50.536: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-dns-zx2b9" for this suite. +Jun 18 07:46:58.562: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:46:58.691: INFO: namespace: e2e-tests-dns-zx2b9, resource: bindings, ignored listing per whitelist +Jun 18 07:46:59.550: INFO: namespace e2e-tests-dns-zx2b9 deletion completed in 9.00774744s + +• [SLOW TEST:14.912 seconds] +[sig-network] DNS +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22 + should provide DNS for the cluster [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +S +------------------------------ +[k8s.io] Docker Containers + should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Docker Containers + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:46:59.550: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename containers +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-containers-p7982 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test override arguments +Jun 18 07:47:00.517: INFO: Waiting up to 5m0s for pod "client-containers-416de30e-919d-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-containers-p7982" to be "success or failure" +Jun 18 07:47:00.520: INFO: Pod "client-containers-416de30e-919d-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.614297ms +Jun 18 07:47:02.532: INFO: Pod "client-containers-416de30e-919d-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014331845s +STEP: Saw pod success +Jun 18 07:47:02.532: INFO: Pod "client-containers-416de30e-919d-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:47:02.535: INFO: Trying to get logs from node node5 pod client-containers-416de30e-919d-11e9-bbf5-0e74dabf3615 container test-container: +STEP: delete the pod +Jun 18 07:47:02.585: INFO: Waiting for pod client-containers-416de30e-919d-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:47:02.589: INFO: Pod client-containers-416de30e-919d-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [k8s.io] Docker Containers + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:47:02.589: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-containers-p7982" for this suite. +Jun 18 07:47:12.606: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:47:12.637: INFO: namespace: e2e-tests-containers-p7982, resource: bindings, ignored listing per whitelist +Jun 18 07:47:12.918: INFO: namespace e2e-tests-containers-p7982 deletion completed in 10.323785672s + +• [SLOW TEST:13.368 seconds] +[k8s.io] Docker Containers +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Update Demo + should do a rolling update of a replication controller [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:47:12.918: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-llkvd +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[BeforeEach] [k8s.io] Update Demo + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:295 +[It] should do a rolling update of a replication controller [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating the initial replication controller +Jun 18 07:47:13.694: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 create -f - --namespace=e2e-tests-kubectl-llkvd' +Jun 18 07:47:14.661: INFO: stderr: "" +Jun 18 07:47:14.661: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n" +STEP: waiting for all containers in name=update-demo pods to come up. +Jun 18 07:47:14.661: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-llkvd' +Jun 18 07:47:15.526: INFO: stderr: "" +Jun 18 07:47:15.526: INFO: stdout: "update-demo-nautilus-vdn86 update-demo-nautilus-w8wch " +Jun 18 07:47:15.526: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods update-demo-nautilus-vdn86 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-llkvd' +Jun 18 07:47:15.609: INFO: stderr: "" +Jun 18 07:47:15.609: INFO: stdout: "" +Jun 18 07:47:15.609: INFO: update-demo-nautilus-vdn86 is created but not running +Jun 18 07:47:20.609: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-llkvd' +Jun 18 07:47:20.696: INFO: stderr: "" +Jun 18 07:47:20.696: INFO: stdout: "update-demo-nautilus-vdn86 update-demo-nautilus-w8wch " +Jun 18 07:47:20.697: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods update-demo-nautilus-vdn86 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-llkvd' +Jun 18 07:47:20.778: INFO: stderr: "" +Jun 18 07:47:20.778: INFO: stdout: "true" +Jun 18 07:47:20.778: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods update-demo-nautilus-vdn86 -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-llkvd' +Jun 18 07:47:20.863: INFO: stderr: "" +Jun 18 07:47:20.863: INFO: stdout: "reg.kpaas.io/kubernetes-e2e-test-images/nautilus:1.0" +Jun 18 07:47:20.863: INFO: validating pod update-demo-nautilus-vdn86 +Jun 18 07:47:20.866: INFO: got data: { + "image": "nautilus.jpg" +} + +Jun 18 07:47:20.866: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Jun 18 07:47:20.866: INFO: update-demo-nautilus-vdn86 is verified up and running +Jun 18 07:47:20.866: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods update-demo-nautilus-w8wch -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-llkvd' +Jun 18 07:47:20.946: INFO: stderr: "" +Jun 18 07:47:20.946: INFO: stdout: "true" +Jun 18 07:47:20.946: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods update-demo-nautilus-w8wch -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-llkvd' +Jun 18 07:47:21.026: INFO: stderr: "" +Jun 18 07:47:21.026: INFO: stdout: "reg.kpaas.io/kubernetes-e2e-test-images/nautilus:1.0" +Jun 18 07:47:21.026: INFO: validating pod update-demo-nautilus-w8wch +Jun 18 07:47:21.029: INFO: got data: { + "image": "nautilus.jpg" +} + +Jun 18 07:47:21.029: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Jun 18 07:47:21.029: INFO: update-demo-nautilus-w8wch is verified up and running +STEP: rolling-update to new replication controller +Jun 18 07:47:21.031: INFO: scanned /root for discovery docs: +Jun 18 07:47:21.031: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 rolling-update update-demo-nautilus --update-period=1s -f - --namespace=e2e-tests-kubectl-llkvd' +Jun 18 07:47:43.697: INFO: stderr: "Command \"rolling-update\" is deprecated, use \"rollout\" instead\n" +Jun 18 07:47:43.697: INFO: stdout: "Created update-demo-kitten\nScaling up update-demo-kitten from 0 to 2, scaling down update-demo-nautilus from 2 to 0 (keep 2 pods available, don't exceed 3 pods)\nScaling update-demo-kitten up to 1\nScaling update-demo-nautilus down to 1\nScaling update-demo-kitten up to 2\nScaling update-demo-nautilus down to 0\nUpdate succeeded. Deleting old controller: update-demo-nautilus\nRenaming update-demo-kitten to update-demo-nautilus\nreplicationcontroller/update-demo-nautilus rolling updated\n" +STEP: waiting for all containers in name=update-demo pods to come up. +Jun 18 07:47:43.697: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-llkvd' +Jun 18 07:47:43.777: INFO: stderr: "" +Jun 18 07:47:43.777: INFO: stdout: "update-demo-kitten-lv6b5 update-demo-kitten-xtxnj " +Jun 18 07:47:43.777: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods update-demo-kitten-lv6b5 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-llkvd' +Jun 18 07:47:43.854: INFO: stderr: "" +Jun 18 07:47:43.854: INFO: stdout: "true" +Jun 18 07:47:43.854: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods update-demo-kitten-lv6b5 -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-llkvd' +Jun 18 07:47:43.932: INFO: stderr: "" +Jun 18 07:47:43.932: INFO: stdout: "reg.kpaas.io/kubernetes-e2e-test-images/kitten:1.0" +Jun 18 07:47:43.932: INFO: validating pod update-demo-kitten-lv6b5 +Jun 18 07:47:43.936: INFO: got data: { + "image": "kitten.jpg" +} + +Jun 18 07:47:43.936: INFO: Unmarshalled json jpg/img => {kitten.jpg} , expecting kitten.jpg . +Jun 18 07:47:43.936: INFO: update-demo-kitten-lv6b5 is verified up and running +Jun 18 07:47:43.936: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods update-demo-kitten-xtxnj -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-llkvd' +Jun 18 07:47:44.010: INFO: stderr: "" +Jun 18 07:47:44.010: INFO: stdout: "true" +Jun 18 07:47:44.010: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods update-demo-kitten-xtxnj -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-llkvd' +Jun 18 07:47:44.094: INFO: stderr: "" +Jun 18 07:47:44.094: INFO: stdout: "reg.kpaas.io/kubernetes-e2e-test-images/kitten:1.0" +Jun 18 07:47:44.094: INFO: validating pod update-demo-kitten-xtxnj +Jun 18 07:47:44.098: INFO: got data: { + "image": "kitten.jpg" +} + +Jun 18 07:47:44.098: INFO: Unmarshalled json jpg/img => {kitten.jpg} , expecting kitten.jpg . +Jun 18 07:47:44.098: INFO: update-demo-kitten-xtxnj is verified up and running +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:47:44.098: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-llkvd" for this suite. +Jun 18 07:48:12.112: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:48:12.671: INFO: namespace: e2e-tests-kubectl-llkvd, resource: bindings, ignored listing per whitelist +Jun 18 07:48:13.514: INFO: namespace e2e-tests-kubectl-llkvd deletion completed in 29.412517876s + +• [SLOW TEST:60.597 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Update Demo + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should do a rolling update of a replication controller [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-storage] ConfigMap + should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:48:13.515: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename configmap +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-configmap-hd94w +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap with name configmap-test-volume-6d12e2e5-919d-11e9-bbf5-0e74dabf3615 +STEP: Creating a pod to test consume configMaps +Jun 18 07:48:13.742: INFO: Waiting up to 5m0s for pod "pod-configmaps-6d134190-919d-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-configmap-hd94w" to be "success or failure" +Jun 18 07:48:13.745: INFO: Pod "pod-configmaps-6d134190-919d-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 3.597953ms +Jun 18 07:48:15.750: INFO: Pod "pod-configmaps-6d134190-919d-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.008430653s +Jun 18 07:48:17.762: INFO: Pod "pod-configmaps-6d134190-919d-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020214286s +STEP: Saw pod success +Jun 18 07:48:17.762: INFO: Pod "pod-configmaps-6d134190-919d-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:48:17.764: INFO: Trying to get logs from node node5 pod pod-configmaps-6d134190-919d-11e9-bbf5-0e74dabf3615 container configmap-volume-test: +STEP: delete the pod +Jun 18 07:48:17.778: INFO: Waiting for pod pod-configmaps-6d134190-919d-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:48:17.780: INFO: Pod pod-configmaps-6d134190-919d-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:48:17.780: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-configmap-hd94w" for this suite. +Jun 18 07:48:27.800: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:48:27.875: INFO: namespace: e2e-tests-configmap-hd94w, resource: bindings, ignored listing per whitelist +Jun 18 07:48:28.112: INFO: namespace e2e-tests-configmap-hd94w deletion completed in 10.328202743s + +• [SLOW TEST:14.598 seconds] +[sig-storage] ConfigMap +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33 + should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSS +------------------------------ +[sig-scheduling] SchedulerPredicates [Serial] + validates that NodeSelector is respected if matching [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:48:28.113: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename sched-pred +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-sched-pred-zp7ss +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:79 +Jun 18 07:48:28.645: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready +Jun 18 07:48:28.652: INFO: Waiting for terminating namespaces to be deleted... +Jun 18 07:48:28.654: INFO: +Logging pods the kubelet thinks is on node node1 before test +Jun 18 07:48:28.664: INFO: qce-postgres-stolon-keeper-1 from qce started at 2019-05-14 09:40:52 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.664: INFO: Container stolon ready: true, restart count 0 +Jun 18 07:48:28.664: INFO: alert-dispatcher-58d448f9c9-t5npr from kube-system started at 2019-06-04 11:39:26 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.664: INFO: Container alert-dispatcher ready: true, restart count 0 +Jun 18 07:48:28.664: INFO: qce-mongo-deploy-65f555f54f-2td5v from qce started at 2019-06-04 11:39:26 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.664: INFO: Container qce-mongo ready: true, restart count 0 +Jun 18 07:48:28.664: INFO: kube-proxy-4kq5g from kube-system started at 2019-05-14 05:39:01 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.664: INFO: Container kube-proxy ready: true, restart count 2 +Jun 18 07:48:28.664: INFO: calico-node-87wc8 from kube-system started at 2019-05-14 06:16:49 +0000 UTC (2 container statuses recorded) +Jun 18 07:48:28.664: INFO: Container calico-node ready: true, restart count 2 +Jun 18 07:48:28.664: INFO: Container install-cni ready: true, restart count 2 +Jun 18 07:48:28.664: INFO: csi-cephfs-ceph-csi-cephfs-provisioner-0 from default started at 2019-05-14 08:47:42 +0000 UTC (2 container statuses recorded) +Jun 18 07:48:28.664: INFO: Container csi-cephfsplugin ready: true, restart count 0 +Jun 18 07:48:28.664: INFO: Container csi-provisioner ready: true, restart count 0 +Jun 18 07:48:28.664: INFO: alertmanager-prometheus-operator-alertmanager-1 from kube-system started at 2019-06-15 05:36:24 +0000 UTC (2 container statuses recorded) +Jun 18 07:48:28.664: INFO: Container alertmanager ready: true, restart count 0 +Jun 18 07:48:28.664: INFO: Container config-reloader ready: true, restart count 0 +Jun 18 07:48:28.664: INFO: csi-cephfs-ceph-csi-cephfs-nodeplugin-2smn4 from default started at 2019-05-14 08:47:42 +0000 UTC (2 container statuses recorded) +Jun 18 07:48:28.664: INFO: Container csi-cephfsplugin ready: true, restart count 0 +Jun 18 07:48:28.664: INFO: Container driver-registrar ready: true, restart count 0 +Jun 18 07:48:28.664: INFO: logkit-poc-dk8x2 from kube-system started at 2019-05-17 03:17:51 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.664: INFO: Container logkit-poc ready: true, restart count 0 +Jun 18 07:48:28.664: INFO: qce-postgres-stolon-sentinel-b6bcb4448-gch5x from qce started at 2019-06-04 11:39:26 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.664: INFO: Container stolon ready: true, restart count 0 +Jun 18 07:48:28.664: INFO: mongorsdata-operator-54b67c6cc5-fh4r4 from qiniu-mongors started at 2019-06-04 11:39:26 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.664: INFO: Container mongors-operator ready: true, restart count 0 +Jun 18 07:48:28.664: INFO: prometheus-operator-prometheus-node-exporter-jd657 from kube-system started at 2019-05-16 08:39:36 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.664: INFO: Container node-exporter ready: true, restart count 0 +Jun 18 07:48:28.664: INFO: qce-etcd-5665b647b-cjlnd from qce started at 2019-06-04 11:39:26 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.664: INFO: Container qce-etcd-etcd ready: true, restart count 0 +Jun 18 07:48:28.664: INFO: csi-rbd-ceph-csi-rbd-nodeplugin-r97x2 from default started at 2019-05-14 08:47:33 +0000 UTC (2 container statuses recorded) +Jun 18 07:48:28.664: INFO: Container csi-rbdplugin ready: true, restart count 0 +Jun 18 07:48:28.664: INFO: Container driver-registrar ready: true, restart count 0 +Jun 18 07:48:28.664: INFO: qce-authzhook-deploy-75cbd8bc4b-wd28x from qce started at 2019-05-14 10:16:10 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.664: INFO: Container qce-authzhook ready: true, restart count 0 +Jun 18 07:48:28.664: INFO: prometheus-prometheus-operator-prometheus-0 from kube-system started at 2019-06-15 09:23:36 +0000 UTC (3 container statuses recorded) +Jun 18 07:48:28.664: INFO: Container prometheus ready: true, restart count 1 +Jun 18 07:48:28.664: INFO: Container prometheus-config-reloader ready: true, restart count 0 +Jun 18 07:48:28.664: INFO: Container rules-configmap-reloader ready: true, restart count 0 +Jun 18 07:48:28.664: INFO: redisdata-operator-cdd96dd96-mxcw6 from qiniu-redis started at 2019-06-04 11:39:27 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.664: INFO: Container redis-operator ready: true, restart count 0 +Jun 18 07:48:28.664: INFO: csirbd-demo-pod from default started at 2019-05-14 08:50:23 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.664: INFO: Container web-server ready: true, restart count 0 +Jun 18 07:48:28.664: INFO: +Logging pods the kubelet thinks is on node node2 before test +Jun 18 07:48:28.679: INFO: logkit-poc-cgpj8 from kube-system started at 2019-05-17 03:17:51 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.679: INFO: Container logkit-poc ready: true, restart count 0 +Jun 18 07:48:28.679: INFO: kube-proxy-hm6bg from kube-system started at 2019-05-14 05:39:31 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.679: INFO: Container kube-proxy ready: true, restart count 0 +Jun 18 07:48:28.679: INFO: alert-controller-568fb6794d-f9vhm from kube-system started at 2019-06-14 01:20:22 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.679: INFO: Container alert-controller ready: true, restart count 0 +Jun 18 07:48:28.679: INFO: qce-postgres-stolon-keeper-0 from qce started at 2019-06-14 23:07:51 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.679: INFO: Container stolon ready: true, restart count 0 +Jun 18 07:48:28.679: INFO: kibana-58f596b5d4-gprzs from kube-system started at 2019-06-09 10:42:30 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.679: INFO: Container kibana ready: true, restart count 0 +Jun 18 07:48:28.679: INFO: redis-operator-b7597fc6c-fhsq9 from qiniu-redis started at 2019-06-06 05:55:00 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.679: INFO: Container redis-operator ready: true, restart count 0 +Jun 18 07:48:28.679: INFO: csi-rbd-ceph-csi-rbd-provisioner-0 from default started at 2019-06-15 04:42:56 +0000 UTC (3 container statuses recorded) +Jun 18 07:48:28.679: INFO: Container csi-provisioner ready: true, restart count 0 +Jun 18 07:48:28.679: INFO: Container csi-rbdplugin ready: true, restart count 0 +Jun 18 07:48:28.679: INFO: Container csi-snapshotter ready: true, restart count 0 +Jun 18 07:48:28.679: INFO: calico-kube-controllers-5ffbcb76cf-km64s from kube-system started at 2019-06-06 06:34:55 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.679: INFO: Container calico-kube-controllers ready: true, restart count 0 +Jun 18 07:48:28.679: INFO: prometheus-operator-prometheus-node-exporter-ctlvb from kube-system started at 2019-05-16 08:39:36 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.679: INFO: Container node-exporter ready: true, restart count 0 +Jun 18 07:48:28.679: INFO: qce-clair-6f69f7554d-2hpxb from qce started at 2019-06-08 07:24:41 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.679: INFO: Container clair ready: true, restart count 0 +Jun 18 07:48:28.679: INFO: csi-cephfs-ceph-csi-cephfs-attacher-0 from default started at 2019-05-14 08:47:42 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.679: INFO: Container csi-cephfsplugin-attacher ready: true, restart count 0 +Jun 18 07:48:28.679: INFO: rabbitmq-operator-845b85b447-qx5nm from qiniu-rabbitmq started at 2019-06-15 05:32:51 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.679: INFO: Container rabbitmq-operator ready: true, restart count 0 +Jun 18 07:48:28.679: INFO: calico-node-vfj4h from kube-system started at 2019-05-14 06:16:49 +0000 UTC (2 container statuses recorded) +Jun 18 07:48:28.679: INFO: Container calico-node ready: true, restart count 0 +Jun 18 07:48:28.679: INFO: Container install-cni ready: true, restart count 0 +Jun 18 07:48:28.679: INFO: csi-cephfs-ceph-csi-cephfs-nodeplugin-c2hjw from default started at 2019-05-14 08:47:42 +0000 UTC (2 container statuses recorded) +Jun 18 07:48:28.679: INFO: Container csi-cephfsplugin ready: true, restart count 0 +Jun 18 07:48:28.679: INFO: Container driver-registrar ready: true, restart count 0 +Jun 18 07:48:28.679: INFO: alert-apiserver-5f887ff458-dcdcn from kube-system started at 2019-06-13 06:50:57 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.679: INFO: Container alert-apiserver ready: true, restart count 0 +Jun 18 07:48:28.679: INFO: csi-rbd-ceph-csi-rbd-nodeplugin-mncbd from default started at 2019-05-14 08:47:33 +0000 UTC (2 container statuses recorded) +Jun 18 07:48:28.679: INFO: Container csi-rbdplugin ready: true, restart count 0 +Jun 18 07:48:28.679: INFO: Container driver-registrar ready: true, restart count 0 +Jun 18 07:48:28.679: INFO: +Logging pods the kubelet thinks is on node node3 before test +Jun 18 07:48:28.689: INFO: prometheus-operator-prometheus-node-exporter-84pmd from kube-system started at 2019-05-16 08:39:36 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.689: INFO: Container node-exporter ready: true, restart count 0 +Jun 18 07:48:28.689: INFO: qce-jenkins-0 from qce started at 2019-06-16 18:40:16 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.689: INFO: Container qce-jenkins ready: true, restart count 0 +Jun 18 07:48:28.689: INFO: csi-rbd-ceph-csi-rbd-nodeplugin-gxvpm from default started at 2019-05-14 08:47:33 +0000 UTC (2 container statuses recorded) +Jun 18 07:48:28.689: INFO: Container csi-rbdplugin ready: true, restart count 0 +Jun 18 07:48:28.689: INFO: Container driver-registrar ready: true, restart count 0 +Jun 18 07:48:28.689: INFO: logkit-poc-znzg2 from kube-system started at 2019-06-18 06:27:20 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.689: INFO: Container logkit-poc ready: true, restart count 0 +Jun 18 07:48:28.689: INFO: kube-proxy-tc77p from kube-system started at 2019-05-14 05:38:50 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.689: INFO: Container kube-proxy ready: true, restart count 0 +Jun 18 07:48:28.689: INFO: calico-node-mzvzv from kube-system started at 2019-05-14 06:16:49 +0000 UTC (2 container statuses recorded) +Jun 18 07:48:28.689: INFO: Container calico-node ready: true, restart count 0 +Jun 18 07:48:28.689: INFO: Container install-cni ready: true, restart count 0 +Jun 18 07:48:28.689: INFO: mongors-operator-65df599b-wjs4w from qiniu-mongors started at 2019-06-04 11:39:27 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.689: INFO: Container mongors-operator ready: true, restart count 0 +Jun 18 07:48:28.689: INFO: qce-portal-deploy-6d799f79df-5lsgc from qce started at 2019-06-17 04:26:28 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.689: INFO: Container qce-portal ready: true, restart count 0 +Jun 18 07:48:28.689: INFO: prometheus-operator-grafana-86b99c77dd-cmbdv from kube-system started at 2019-05-16 08:39:36 +0000 UTC (2 container statuses recorded) +Jun 18 07:48:28.689: INFO: Container grafana ready: true, restart count 0 +Jun 18 07:48:28.689: INFO: Container grafana-sc-dashboard ready: true, restart count 39 +Jun 18 07:48:28.689: INFO: alert-apiserver-etcd-6d744f7648-llfwf from kube-system started at 2019-06-13 06:49:42 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.689: INFO: Container alert-apiserver-etcd ready: true, restart count 0 +Jun 18 07:48:28.689: INFO: tiller-deploy-555696dfc8-gvznf from kube-system started at 2019-05-14 08:33:12 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.689: INFO: Container tiller ready: true, restart count 0 +Jun 18 07:48:28.689: INFO: csi-cephfs-ceph-csi-cephfs-nodeplugin-tnz48 from default started at 2019-05-14 08:47:42 +0000 UTC (2 container statuses recorded) +Jun 18 07:48:28.689: INFO: Container csi-cephfsplugin ready: true, restart count 0 +Jun 18 07:48:28.689: INFO: Container driver-registrar ready: true, restart count 0 +Jun 18 07:48:28.689: INFO: qce-postgres-stolon-sentinel-b6bcb4448-c4nmj from qce started at 2019-05-14 09:40:16 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.689: INFO: Container stolon ready: true, restart count 0 +Jun 18 07:48:28.689: INFO: qce-postgres-stolon-proxy-78b9bc58d8-pg92h from qce started at 2019-05-14 09:40:16 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.689: INFO: Container stolon ready: true, restart count 0 +Jun 18 07:48:28.689: INFO: prometheus-operator-prometheus-blackbox-exporter-5d4cbbf54vzmk6 from kube-system started at 2019-05-16 08:39:36 +0000 UTC (2 container statuses recorded) +Jun 18 07:48:28.689: INFO: Container blackbox-exporter ready: true, restart count 0 +Jun 18 07:48:28.689: INFO: Container configmap-reload ready: true, restart count 0 +Jun 18 07:48:28.689: INFO: prometheus-operator-kube-state-metrics-969f69894-p5bbm from kube-system started at 2019-05-16 08:39:36 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.689: INFO: Container kube-state-metrics ready: true, restart count 0 +Jun 18 07:48:28.689: INFO: +Logging pods the kubelet thinks is on node node4 before test +Jun 18 07:48:28.697: INFO: csi-cephfs-ceph-csi-cephfs-nodeplugin-7cg42 from default started at 2019-06-16 19:50:32 +0000 UTC (2 container statuses recorded) +Jun 18 07:48:28.697: INFO: Container csi-cephfsplugin ready: true, restart count 0 +Jun 18 07:48:28.697: INFO: Container driver-registrar ready: true, restart count 0 +Jun 18 07:48:28.697: INFO: mysqldata-operator-6f447687b6-qdkt8 from qiniu-mysql started at 2019-06-18 03:17:07 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.697: INFO: Container mysql-operator ready: true, restart count 0 +Jun 18 07:48:28.697: INFO: mysql-operator-v2-645fcc7f6c-l9dtm from qiniu-mysql started at 2019-06-18 03:19:36 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.697: INFO: Container mysql-operator ready: true, restart count 0 +Jun 18 07:48:28.697: INFO: prometheus-operator-prometheus-node-exporter-f2zgm from kube-system started at 2019-06-16 19:39:12 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.697: INFO: Container node-exporter ready: true, restart count 0 +Jun 18 07:48:28.697: INFO: kube-proxy-2vsgc from kube-system started at 2019-06-16 19:50:32 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.697: INFO: Container kube-proxy ready: true, restart count 0 +Jun 18 07:48:28.697: INFO: elasticsearch-c5cc84d5f-ctdmq from kube-system started at 2019-06-18 06:26:40 +0000 UTC (2 container statuses recorded) +Jun 18 07:48:28.697: INFO: Container elasticsearch ready: true, restart count 0 +Jun 18 07:48:28.697: INFO: Container es-rotate ready: true, restart count 0 +Jun 18 07:48:28.697: INFO: calico-node-fhsvk from kube-system started at 2019-06-16 19:53:03 +0000 UTC (2 container statuses recorded) +Jun 18 07:48:28.697: INFO: Container calico-node ready: true, restart count 0 +Jun 18 07:48:28.697: INFO: Container install-cni ready: true, restart count 0 +Jun 18 07:48:28.697: INFO: csi-rbd-ceph-csi-rbd-nodeplugin-q2jtp from default started at 2019-06-16 19:51:06 +0000 UTC (2 container statuses recorded) +Jun 18 07:48:28.697: INFO: Container csi-rbdplugin ready: true, restart count 0 +Jun 18 07:48:28.697: INFO: Container driver-registrar ready: true, restart count 0 +Jun 18 07:48:28.697: INFO: kirk-apiserver-doc-6b5f8c7dd8-lm2pv from qce started at 2019-06-18 05:42:55 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.697: INFO: Container kirk-apiserver-doc ready: true, restart count 0 +Jun 18 07:48:28.697: INFO: logkit-poc-7shgm from kube-system started at 2019-06-16 19:36:14 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.697: INFO: Container logkit-poc ready: true, restart count 0 +Jun 18 07:48:28.697: INFO: +Logging pods the kubelet thinks is on node node5 before test +Jun 18 07:48:28.705: INFO: qce-postgres-stolon-proxy-78b9bc58d8-8pp2x from qce started at 2019-05-14 09:40:16 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.705: INFO: Container stolon ready: true, restart count 0 +Jun 18 07:48:28.705: INFO: onetimeurl-controller-745fc87d5d-g58jg from qce started at 2019-05-14 10:16:10 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.705: INFO: Container onetimeurl-controller ready: true, restart count 0 +Jun 18 07:48:28.705: INFO: logkit-poc-5z5cm from kube-system started at 2019-05-17 03:17:51 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.705: INFO: Container logkit-poc ready: true, restart count 0 +Jun 18 07:48:28.705: INFO: csi-rbd-ceph-csi-rbd-nodeplugin-42fl8 from default started at 2019-05-14 08:47:33 +0000 UTC (2 container statuses recorded) +Jun 18 07:48:28.705: INFO: Container csi-rbdplugin ready: true, restart count 0 +Jun 18 07:48:28.705: INFO: Container driver-registrar ready: true, restart count 0 +Jun 18 07:48:28.705: INFO: csi-rbd-ceph-csi-rbd-attacher-0 from default started at 2019-05-14 08:47:33 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.705: INFO: Container csi-rbdplugin-attacher ready: true, restart count 0 +Jun 18 07:48:28.705: INFO: sonobuoy from heptio-sonobuoy started at 2019-06-18 07:13:06 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.705: INFO: Container kube-sonobuoy ready: true, restart count 0 +Jun 18 07:48:28.705: INFO: alertmanager-prometheus-operator-alertmanager-0 from kube-system started at 2019-05-16 08:39:44 +0000 UTC (2 container statuses recorded) +Jun 18 07:48:28.705: INFO: Container alertmanager ready: true, restart count 0 +Jun 18 07:48:28.705: INFO: Container config-reloader ready: true, restart count 0 +Jun 18 07:48:28.705: INFO: sonobuoy-e2e-job-2b96015867f64622 from heptio-sonobuoy started at 2019-06-18 07:13:12 +0000 UTC (2 container statuses recorded) +Jun 18 07:48:28.705: INFO: Container e2e ready: true, restart count 0 +Jun 18 07:48:28.705: INFO: Container sonobuoy-worker ready: true, restart count 0 +Jun 18 07:48:28.705: INFO: qce-user-manual-deploy-867778f667-dcl87 from qce started at 2019-05-27 12:26:46 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.705: INFO: Container qce-user-manual ready: true, restart count 0 +Jun 18 07:48:28.705: INFO: prometheus-operator-prometheus-node-exporter-9g6lb from kube-system started at 2019-05-16 08:39:36 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.705: INFO: Container node-exporter ready: true, restart count 0 +Jun 18 07:48:28.705: INFO: prometheus-prometheus-operator-prometheus-1 from kube-system started at 2019-06-13 11:42:12 +0000 UTC (3 container statuses recorded) +Jun 18 07:48:28.705: INFO: Container prometheus ready: true, restart count 0 +Jun 18 07:48:28.705: INFO: Container prometheus-config-reloader ready: true, restart count 0 +Jun 18 07:48:28.705: INFO: Container rules-configmap-reloader ready: true, restart count 0 +Jun 18 07:48:28.705: INFO: alert-dispatcher-58d448f9c9-4mxgj from kube-system started at 2019-06-15 12:19:08 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.705: INFO: Container alert-dispatcher ready: true, restart count 0 +Jun 18 07:48:28.705: INFO: kube-proxy-lqpj7 from kube-system started at 2019-05-14 05:38:48 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.705: INFO: Container kube-proxy ready: true, restart count 0 +Jun 18 07:48:28.705: INFO: qce-postgres-stolon-sentinel-b6bcb4448-jbrkl from qce started at 2019-05-14 09:40:16 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.705: INFO: Container stolon ready: true, restart count 0 +Jun 18 07:48:28.705: INFO: prometheus-operator-operator-654b9d4648-lflhd from kube-system started at 2019-05-16 08:39:36 +0000 UTC (1 container statuses recorded) +Jun 18 07:48:28.705: INFO: Container prometheus-operator ready: true, restart count 0 +Jun 18 07:48:28.705: INFO: calico-node-fmzrt from kube-system started at 2019-05-14 06:16:49 +0000 UTC (2 container statuses recorded) +Jun 18 07:48:28.705: INFO: Container calico-node ready: true, restart count 0 +Jun 18 07:48:28.705: INFO: Container install-cni ready: true, restart count 0 +Jun 18 07:48:28.705: INFO: csi-cephfs-ceph-csi-cephfs-nodeplugin-jfmbb from default started at 2019-05-14 08:47:42 +0000 UTC (2 container statuses recorded) +Jun 18 07:48:28.705: INFO: Container csi-cephfsplugin ready: true, restart count 0 +Jun 18 07:48:28.705: INFO: Container driver-registrar ready: true, restart count 0 +[It] validates that NodeSelector is respected if matching [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Trying to launch a pod without a label to get a node which can launch it. +STEP: Explicitly delete pod here to free the resource it takes. +STEP: Trying to apply a random label on the found node. +STEP: verifying the node has the label kubernetes.io/e2e-77331eef-919d-11e9-bbf5-0e74dabf3615 42 +STEP: Trying to relaunch the pod, now with labels. +STEP: removing the label kubernetes.io/e2e-77331eef-919d-11e9-bbf5-0e74dabf3615 off the node node5 +STEP: verifying the node doesn't have the label kubernetes.io/e2e-77331eef-919d-11e9-bbf5-0e74dabf3615 +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:48:32.755: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-sched-pred-zp7ss" for this suite. +Jun 18 07:48:44.767: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:48:44.866: INFO: namespace: e2e-tests-sched-pred-zp7ss, resource: bindings, ignored listing per whitelist +Jun 18 07:48:45.079: INFO: namespace e2e-tests-sched-pred-zp7ss deletion completed in 12.32087471s +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:70 + +• [SLOW TEST:16.966 seconds] +[sig-scheduling] SchedulerPredicates [Serial] +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:22 + validates that NodeSelector is respected if matching [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl patch + should add annotations for pods in rc [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:48:45.079: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-kvw6r +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[It] should add annotations for pods in rc [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating Redis RC +Jun 18 07:48:45.687: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 create -f - --namespace=e2e-tests-kubectl-kvw6r' +Jun 18 07:48:46.635: INFO: stderr: "" +Jun 18 07:48:46.635: INFO: stdout: "replicationcontroller/redis-master created\n" +STEP: Waiting for Redis master to start. +Jun 18 07:48:47.657: INFO: Selector matched 1 pods for map[app:redis] +Jun 18 07:48:47.657: INFO: Found 0 / 1 +Jun 18 07:48:48.637: INFO: Selector matched 1 pods for map[app:redis] +Jun 18 07:48:48.637: INFO: Found 1 / 1 +Jun 18 07:48:48.637: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 +STEP: patching all pods +Jun 18 07:48:48.639: INFO: Selector matched 1 pods for map[app:redis] +Jun 18 07:48:48.639: INFO: ForEach: Found 1 pods from the filter. Now looping through them. +Jun 18 07:48:48.639: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 patch pod redis-master-djgpm --namespace=e2e-tests-kubectl-kvw6r -p {"metadata":{"annotations":{"x":"y"}}}' +Jun 18 07:48:48.739: INFO: stderr: "" +Jun 18 07:48:48.739: INFO: stdout: "pod/redis-master-djgpm patched\n" +STEP: checking annotations +Jun 18 07:48:48.742: INFO: Selector matched 1 pods for map[app:redis] +Jun 18 07:48:48.742: INFO: ForEach: Found 1 pods from the filter. Now looping through them. +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:48:48.742: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-kvw6r" for this suite. +Jun 18 07:49:12.759: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:49:13.563: INFO: namespace: e2e-tests-kubectl-kvw6r, resource: bindings, ignored listing per whitelist +Jun 18 07:49:13.640: INFO: namespace e2e-tests-kubectl-kvw6r deletion completed in 24.893673878s + +• [SLOW TEST:28.561 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Kubectl patch + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should add annotations for pods in rc [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSS +------------------------------ +[k8s.io] Container Runtime blackbox test when starting a container that exits + should run with the expected status [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Container Runtime + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:49:13.641: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename container-runtime +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-container-runtime-4w97t +STEP: Waiting for a default service account to be provisioned in namespace +[It] should run with the expected status [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Container 'terminate-cmd-rpa': should get the expected 'RestartCount' +STEP: Container 'terminate-cmd-rpa': should get the expected 'Phase' +STEP: Container 'terminate-cmd-rpa': should get the expected 'Ready' condition +STEP: Container 'terminate-cmd-rpa': should get the expected 'State' +STEP: Container 'terminate-cmd-rpa': should be possible to delete [NodeConformance] +STEP: Container 'terminate-cmd-rpof': should get the expected 'RestartCount' +STEP: Container 'terminate-cmd-rpof': should get the expected 'Phase' +STEP: Container 'terminate-cmd-rpof': should get the expected 'Ready' condition +STEP: Container 'terminate-cmd-rpof': should get the expected 'State' +STEP: Container 'terminate-cmd-rpof': should be possible to delete [NodeConformance] +STEP: Container 'terminate-cmd-rpn': should get the expected 'RestartCount' +STEP: Container 'terminate-cmd-rpn': should get the expected 'Phase' +STEP: Container 'terminate-cmd-rpn': should get the expected 'Ready' condition +STEP: Container 'terminate-cmd-rpn': should get the expected 'State' +STEP: Container 'terminate-cmd-rpn': should be possible to delete [NodeConformance] +[AfterEach] [k8s.io] Container Runtime + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:49:41.668: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-container-runtime-4w97t" for this suite. +Jun 18 07:49:49.683: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:49:50.653: INFO: namespace: e2e-tests-container-runtime-4w97t, resource: bindings, ignored listing per whitelist +Jun 18 07:49:51.518: INFO: namespace e2e-tests-container-runtime-4w97t deletion completed in 9.847651924s + +• [SLOW TEST:37.878 seconds] +[k8s.io] Container Runtime +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + blackbox test + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/runtime.go:37 + when starting a container that exits + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/runtime.go:38 + should run with the expected status [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSS +------------------------------ +[sig-apps] Deployment + deployment should support proportional scaling [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] Deployment + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:49:51.519: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename deployment +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-deployment-bjvpd +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Deployment + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:65 +[It] deployment should support proportional scaling [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +Jun 18 07:49:51.704: INFO: Creating deployment "nginx-deployment" +Jun 18 07:49:51.710: INFO: Waiting for observed generation 1 +Jun 18 07:49:54.614: INFO: Waiting for all required pods to come up +Jun 18 07:49:54.632: INFO: Pod name nginx: Found 10 pods out of 10 +STEP: ensuring each pod is running +Jun 18 07:49:56.652: INFO: Waiting for deployment "nginx-deployment" to complete +Jun 18 07:49:56.660: INFO: Updating deployment "nginx-deployment" with a non-existent image +Jun 18 07:49:56.665: INFO: Updating deployment nginx-deployment +Jun 18 07:49:56.665: INFO: Waiting for observed generation 2 +Jun 18 07:49:58.674: INFO: Waiting for the first rollout's replicaset to have .status.availableReplicas = 8 +Jun 18 07:49:58.680: INFO: Waiting for the first rollout's replicaset to have .spec.replicas = 8 +Jun 18 07:49:58.684: INFO: Waiting for the first rollout's replicaset of deployment "nginx-deployment" to have desired number of replicas +Jun 18 07:49:58.690: INFO: Verifying that the second rollout's replicaset has .status.availableReplicas = 0 +Jun 18 07:49:58.690: INFO: Waiting for the second rollout's replicaset to have .spec.replicas = 5 +Jun 18 07:49:58.691: INFO: Waiting for the second rollout's replicaset of deployment "nginx-deployment" to have desired number of replicas +Jun 18 07:49:58.694: INFO: Verifying that deployment "nginx-deployment" has minimum required number of available replicas +Jun 18 07:49:58.694: INFO: Scaling up the deployment "nginx-deployment" from 10 to 30 +Jun 18 07:49:58.698: INFO: Updating deployment nginx-deployment +Jun 18 07:49:58.698: INFO: Waiting for the replicasets of deployment "nginx-deployment" to have desired number of replicas +Jun 18 07:49:58.701: INFO: Verifying that first rollout's replicaset has .spec.replicas = 20 +Jun 18 07:49:58.704: INFO: Verifying that second rollout's replicaset has .spec.replicas = 13 +[AfterEach] [sig-apps] Deployment + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:59 +Jun 18 07:49:58.732: INFO: Deployment "nginx-deployment": +&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment,GenerateName:,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-bjvpd/deployments/nginx-deployment,UID:a7776ac2-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538156,Generation:3,CreationTimestamp:2019-06-18 07:49:51 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,},Annotations:map[string]string{deployment.kubernetes.io/revision: 2,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:DeploymentSpec{Replicas:*30,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: nginx,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:2,MaxSurge:3,},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:3,Replicas:13,UpdatedReplicas:5,AvailableReplicas:8,UnavailableReplicas:5,Conditions:[{Progressing True 2019-06-18 07:49:56 +0000 UTC 2019-06-18 07:49:51 +0000 UTC ReplicaSetUpdated ReplicaSet "nginx-deployment-65bbdb5f8" is progressing.} {Available False 2019-06-18 07:49:58 +0000 UTC 2019-06-18 07:49:58 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.}],ReadyReplicas:8,CollisionCount:nil,},} + +Jun 18 07:49:58.749: INFO: New ReplicaSet "nginx-deployment-65bbdb5f8" of Deployment "nginx-deployment": +&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8,GenerateName:,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-bjvpd/replicasets/nginx-deployment-65bbdb5f8,UID:aa6c884a-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538148,Generation:3,CreationTimestamp:2019-06-18 07:49:56 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 30,deployment.kubernetes.io/max-replicas: 33,deployment.kubernetes.io/revision: 2,},OwnerReferences:[{apps/v1 Deployment nginx-deployment a7776ac2-919d-11e9-8cfd-00163e000a67 0xc0025654b7 0xc0025654b8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*13,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:5,FullyLabeledReplicas:5,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},} +Jun 18 07:49:58.749: INFO: All old ReplicaSets of Deployment "nginx-deployment": +Jun 18 07:49:58.749: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965,GenerateName:,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-bjvpd/replicasets/nginx-deployment-555b55d965,UID:a7792963-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538177,Generation:3,CreationTimestamp:2019-06-18 07:49:51 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 30,deployment.kubernetes.io/max-replicas: 33,deployment.kubernetes.io/revision: 1,},OwnerReferences:[{apps/v1 Deployment nginx-deployment a7776ac2-919d-11e9-8cfd-00163e000a67 0xc0025653f7 0xc0025653f8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*20,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:8,FullyLabeledReplicas:8,ObservedGeneration:3,ReadyReplicas:8,AvailableReplicas:8,Conditions:[],},} +Jun 18 07:49:58.761: INFO: Pod "nginx-deployment-555b55d965-29zj8" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-29zj8,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-555b55d965-29zj8,UID:aba3ae22-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538182,Generation:0,CreationTimestamp:2019-06-18 07:49:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 a7792963-919d-11e9-8cfd-00163e000a67 0xc001b98807 0xc001b98808}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node5,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001b98880} {node.kubernetes.io/unreachable Exists NoExecute 0xc001b988a0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:58 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:58 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:58 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:58 +0000 UTC }],Message:,Reason:,HostIP:192.168.2.155,PodIP:,StartTime:2019-06-18 07:49:58 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.761: INFO: Pod "nginx-deployment-555b55d965-2pxt9" is available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-2pxt9,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-555b55d965-2pxt9,UID:a7f48513-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538048,Generation:0,CreationTimestamp:2019-06-18 07:49:52 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 a7792963-919d-11e9-8cfd-00163e000a67 0xc001b98957 0xc001b98958}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node5,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001b989d0} {node.kubernetes.io/unreachable Exists NoExecute 0xc001b989f0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:52 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:52 +0000 UTC }],Message:,Reason:,HostIP:192.168.2.155,PodIP:171.171.33.178,StartTime:2019-06-18 07:49:52 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-18 07:49:55 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://b45a6ce963285f387631ac84c313eb7bfa37c25fcc4efd460afe834dcbc96f10}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.761: INFO: Pod "nginx-deployment-555b55d965-45wb8" is available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-45wb8,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-555b55d965-45wb8,UID:a82391c7-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538036,Generation:0,CreationTimestamp:2019-06-18 07:49:52 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 a7792963-919d-11e9-8cfd-00163e000a67 0xc001b98ab7 0xc001b98ab8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001b98b30} {node.kubernetes.io/unreachable Exists NoExecute 0xc001b98b50}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:52 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:55 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:55 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:52 +0000 UTC }],Message:,Reason:,HostIP:192.168.2.152,PodIP:171.171.104.37,StartTime:2019-06-18 07:49:52 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-18 07:49:55 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://0b98402f7f8169035e19de30c45f7355ace3141e473e823a6df2a362551bb956}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.761: INFO: Pod "nginx-deployment-555b55d965-5tnqc" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-5tnqc,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-555b55d965-5tnqc,UID:aba6de87-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538190,Generation:0,CreationTimestamp:2019-06-18 07:49:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 a7792963-919d-11e9-8cfd-00163e000a67 0xc001b98c17 0xc001b98c18}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node5,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001b98c90} {node.kubernetes.io/unreachable Exists NoExecute 0xc001b98cb0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:58 +0000 UTC }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.761: INFO: Pod "nginx-deployment-555b55d965-5zb4f" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-5zb4f,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-555b55d965-5zb4f,UID:aba6bc34-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538193,Generation:0,CreationTimestamp:2019-06-18 07:49:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 a7792963-919d-11e9-8cfd-00163e000a67 0xc001b98d27 0xc001b98d28}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node5,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001b98da0} {node.kubernetes.io/unreachable Exists NoExecute 0xc001b98dc0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:58 +0000 UTC }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.761: INFO: Pod "nginx-deployment-555b55d965-6lnsp" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-6lnsp,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-555b55d965-6lnsp,UID:aba3c464-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538162,Generation:0,CreationTimestamp:2019-06-18 07:49:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 a7792963-919d-11e9-8cfd-00163e000a67 0xc001b98e37 0xc001b98e38}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001b98eb0} {node.kubernetes.io/unreachable Exists NoExecute 0xc001b98ed0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:58 +0000 UTC }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.761: INFO: Pod "nginx-deployment-555b55d965-72zbr" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-72zbr,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-555b55d965-72zbr,UID:aba4b7fe-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538176,Generation:0,CreationTimestamp:2019-06-18 07:49:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 a7792963-919d-11e9-8cfd-00163e000a67 0xc001b98f47 0xc001b98f48}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node5,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001b98fc0} {node.kubernetes.io/unreachable Exists NoExecute 0xc001b98fe0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:58 +0000 UTC }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.761: INFO: Pod "nginx-deployment-555b55d965-9lc6n" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-9lc6n,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-555b55d965-9lc6n,UID:aba6d068-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538189,Generation:0,CreationTimestamp:2019-06-18 07:49:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 a7792963-919d-11e9-8cfd-00163e000a67 0xc001b99057 0xc001b99058}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node5,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001b990d0} {node.kubernetes.io/unreachable Exists NoExecute 0xc001b990f0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:58 +0000 UTC }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.761: INFO: Pod "nginx-deployment-555b55d965-cmc4p" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-cmc4p,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-555b55d965-cmc4p,UID:aba4c93e-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538167,Generation:0,CreationTimestamp:2019-06-18 07:49:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 a7792963-919d-11e9-8cfd-00163e000a67 0xc001b99167 0xc001b99168}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node4,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001b991e0} {node.kubernetes.io/unreachable Exists NoExecute 0xc001b99200}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:58 +0000 UTC }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.761: INFO: Pod "nginx-deployment-555b55d965-hfgwk" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-hfgwk,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-555b55d965-hfgwk,UID:aba4de70-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538166,Generation:0,CreationTimestamp:2019-06-18 07:49:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 a7792963-919d-11e9-8cfd-00163e000a67 0xc001b99277 0xc001b99278}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001b992f0} {node.kubernetes.io/unreachable Exists NoExecute 0xc001b99320}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:58 +0000 UTC }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.762: INFO: Pod "nginx-deployment-555b55d965-hmplf" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-hmplf,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-555b55d965-hmplf,UID:aba338bb-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538183,Generation:0,CreationTimestamp:2019-06-18 07:49:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 a7792963-919d-11e9-8cfd-00163e000a67 0xc001b993a7 0xc001b993a8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001b99440} {node.kubernetes.io/unreachable Exists NoExecute 0xc001b99470}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:58 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:58 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:58 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:58 +0000 UTC }],Message:,Reason:,HostIP:192.168.2.151,PodIP:,StartTime:2019-06-18 07:49:58 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.762: INFO: Pod "nginx-deployment-555b55d965-mvbc7" is available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-mvbc7,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-555b55d965-mvbc7,UID:a7f5755d-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538053,Generation:0,CreationTimestamp:2019-06-18 07:49:52 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 a7792963-919d-11e9-8cfd-00163e000a67 0xc001b99527 0xc001b99528}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001b995a0} {node.kubernetes.io/unreachable Exists NoExecute 0xc001b995c0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:52 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:55 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:55 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:52 +0000 UTC }],Message:,Reason:,HostIP:192.168.2.152,PodIP:171.171.104.38,StartTime:2019-06-18 07:49:52 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-18 07:49:55 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://cf90d595e998061843136e391e11451746f2e9389ee30a1a5162c7b6fd54d20e}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.762: INFO: Pod "nginx-deployment-555b55d965-n8tp4" is available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-n8tp4,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-555b55d965-n8tp4,UID:a823bd89-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538040,Generation:0,CreationTimestamp:2019-06-18 07:49:52 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 a7792963-919d-11e9-8cfd-00163e000a67 0xc001b99687 0xc001b99688}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node5,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001b99710} {node.kubernetes.io/unreachable Exists NoExecute 0xc001b99730}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:52 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:52 +0000 UTC }],Message:,Reason:,HostIP:192.168.2.155,PodIP:171.171.33.146,StartTime:2019-06-18 07:49:52 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-18 07:49:55 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://da0c1506afe09544f1fb5539651f723c024e00ea99d8036768fcd7078e7ff016}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.762: INFO: Pod "nginx-deployment-555b55d965-tv8gc" is available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-tv8gc,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-555b55d965-tv8gc,UID:a814f8c2-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538052,Generation:0,CreationTimestamp:2019-06-18 07:49:52 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 a7792963-919d-11e9-8cfd-00163e000a67 0xc001b99807 0xc001b99808}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node5,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001b998c0} {node.kubernetes.io/unreachable Exists NoExecute 0xc001b998e0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:52 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:52 +0000 UTC }],Message:,Reason:,HostIP:192.168.2.155,PodIP:171.171.33.145,StartTime:2019-06-18 07:49:52 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-18 07:49:55 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://87afab93282c384cfd88d191f146422a32364b713c41c47225c936dcc294703b}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.762: INFO: Pod "nginx-deployment-555b55d965-v5mf7" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-v5mf7,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-555b55d965-v5mf7,UID:aba6e4c7-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538187,Generation:0,CreationTimestamp:2019-06-18 07:49:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 a7792963-919d-11e9-8cfd-00163e000a67 0xc001b999a7 0xc001b999a8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node4,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001b99a30} {node.kubernetes.io/unreachable Exists NoExecute 0xc001b99a50}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:58 +0000 UTC }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.762: INFO: Pod "nginx-deployment-555b55d965-vkcp9" is available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-vkcp9,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-555b55d965-vkcp9,UID:a7f569ab-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538039,Generation:0,CreationTimestamp:2019-06-18 07:49:52 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 a7792963-919d-11e9-8cfd-00163e000a67 0xc001b99ac7 0xc001b99ac8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001b99b40} {node.kubernetes.io/unreachable Exists NoExecute 0xc001b99b60}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:52 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:52 +0000 UTC }],Message:,Reason:,HostIP:192.168.2.153,PodIP:171.171.135.6,StartTime:2019-06-18 07:49:52 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-18 07:49:55 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://32ba0f8a0f79f33d3ca028226af4c61e6375d253537d347c0dff6f96657d0370}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.762: INFO: Pod "nginx-deployment-555b55d965-wvxbx" is available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-wvxbx,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-555b55d965-wvxbx,UID:a814d315-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538037,Generation:0,CreationTimestamp:2019-06-18 07:49:52 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 a7792963-919d-11e9-8cfd-00163e000a67 0xc001b99c27 0xc001b99c28}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001b99ca0} {node.kubernetes.io/unreachable Exists NoExecute 0xc001b99cc0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:52 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:52 +0000 UTC }],Message:,Reason:,HostIP:192.168.2.151,PodIP:171.171.166.142,StartTime:2019-06-18 07:49:52 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-18 07:49:55 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://66230b327f213d78f184f05a889c4f6e54a4e6332cb0cb5527db8f3eee01a02c}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.762: INFO: Pod "nginx-deployment-555b55d965-xzmdt" is available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-xzmdt,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-555b55d965-xzmdt,UID:a814cb85-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538047,Generation:0,CreationTimestamp:2019-06-18 07:49:52 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 a7792963-919d-11e9-8cfd-00163e000a67 0xc001b99d87 0xc001b99d88}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001b99e00} {node.kubernetes.io/unreachable Exists NoExecute 0xc001b99e20}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:52 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:55 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:55 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:52 +0000 UTC }],Message:,Reason:,HostIP:192.168.2.152,PodIP:171.171.104.23,StartTime:2019-06-18 07:49:52 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-18 07:49:55 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://f03e660d554dab1a4c2d18a7b16fd77e912a2f7d8262b24db037da4cdaa1781f}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.762: INFO: Pod "nginx-deployment-555b55d965-zms6z" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-zms6z,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-555b55d965-zms6z,UID:aba4aab2-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538172,Generation:0,CreationTimestamp:2019-06-18 07:49:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 a7792963-919d-11e9-8cfd-00163e000a67 0xc001b99ee7 0xc001b99ee8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node5,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001b99f60} {node.kubernetes.io/unreachable Exists NoExecute 0xc001b99f80}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:58 +0000 UTC }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.763: INFO: Pod "nginx-deployment-555b55d965-zsztl" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-zsztl,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-555b55d965-zsztl,UID:aba6d782-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538192,Generation:0,CreationTimestamp:2019-06-18 07:49:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 a7792963-919d-11e9-8cfd-00163e000a67 0xc001b99ff7 0xc001b99ff8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node5,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc0016ae070} {node.kubernetes.io/unreachable Exists NoExecute 0xc0016ae090}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:58 +0000 UTC }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.763: INFO: Pod "nginx-deployment-65bbdb5f8-25sjs" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-25sjs,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-65bbdb5f8-25sjs,UID:aba88ae6-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538195,Generation:0,CreationTimestamp:2019-06-18 07:49:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 aa6c884a-919d-11e9-8cfd-00163e000a67 0xc0016ae107 0xc0016ae108}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node4,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc0016ae180} {node.kubernetes.io/unreachable Exists NoExecute 0xc0016ae1a0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:58 +0000 UTC }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.763: INFO: Pod "nginx-deployment-65bbdb5f8-2zq97" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-2zq97,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-65bbdb5f8-2zq97,UID:aba8b636-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538199,Generation:0,CreationTimestamp:2019-06-18 07:49:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 aa6c884a-919d-11e9-8cfd-00163e000a67 0xc0016ae217 0xc0016ae218}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node5,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc0016ae290} {node.kubernetes.io/unreachable Exists NoExecute 0xc0016ae2b0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:58 +0000 UTC }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.763: INFO: Pod "nginx-deployment-65bbdb5f8-8d2wl" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-8d2wl,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-65bbdb5f8-8d2wl,UID:aba8acc6-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538197,Generation:0,CreationTimestamp:2019-06-18 07:49:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 aa6c884a-919d-11e9-8cfd-00163e000a67 0xc0016ae337 0xc0016ae338}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc0016ae3b0} {node.kubernetes.io/unreachable Exists NoExecute 0xc0016ae3d0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:58 +0000 UTC }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.763: INFO: Pod "nginx-deployment-65bbdb5f8-bw9xw" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-bw9xw,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-65bbdb5f8-bw9xw,UID:aa6ddbbd-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538110,Generation:0,CreationTimestamp:2019-06-18 07:49:56 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 aa6c884a-919d-11e9-8cfd-00163e000a67 0xc0016ae447 0xc0016ae448}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc0016ae4c0} {node.kubernetes.io/unreachable Exists NoExecute 0xc0016ae4e0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC }],Message:,Reason:,HostIP:192.168.2.152,PodIP:,StartTime:2019-06-18 07:49:56 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404 }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.763: INFO: Pod "nginx-deployment-65bbdb5f8-fqbbc" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-fqbbc,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-65bbdb5f8-fqbbc,UID:aa73faa2-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538107,Generation:0,CreationTimestamp:2019-06-18 07:49:56 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 aa6c884a-919d-11e9-8cfd-00163e000a67 0xc0016ae5a7 0xc0016ae5a8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node4,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc0016ae620} {node.kubernetes.io/unreachable Exists NoExecute 0xc0016ae640}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC }],Message:,Reason:,HostIP:192.168.2.154,PodIP:,StartTime:2019-06-18 07:49:56 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404 }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.763: INFO: Pod "nginx-deployment-65bbdb5f8-jkxqw" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-jkxqw,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-65bbdb5f8-jkxqw,UID:aba6c878-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538188,Generation:0,CreationTimestamp:2019-06-18 07:49:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 aa6c884a-919d-11e9-8cfd-00163e000a67 0xc0016ae717 0xc0016ae718}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc0016ae790} {node.kubernetes.io/unreachable Exists NoExecute 0xc0016ae7b0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:58 +0000 UTC }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.764: INFO: Pod "nginx-deployment-65bbdb5f8-mcbtn" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-mcbtn,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-65bbdb5f8-mcbtn,UID:aa6cffef-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538087,Generation:0,CreationTimestamp:2019-06-18 07:49:56 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 aa6c884a-919d-11e9-8cfd-00163e000a67 0xc0016ae827 0xc0016ae828}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node5,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc0016ae8a0} {node.kubernetes.io/unreachable Exists NoExecute 0xc0016ae8c0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC }],Message:,Reason:,HostIP:192.168.2.155,PodIP:,StartTime:2019-06-18 07:49:56 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404 }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.764: INFO: Pod "nginx-deployment-65bbdb5f8-mjfws" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-mjfws,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-65bbdb5f8-mjfws,UID:aba46325-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538164,Generation:0,CreationTimestamp:2019-06-18 07:49:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 aa6c884a-919d-11e9-8cfd-00163e000a67 0xc0016ae987 0xc0016ae988}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node5,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc0016aea00} {node.kubernetes.io/unreachable Exists NoExecute 0xc0016aea20}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:58 +0000 UTC }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.764: INFO: Pod "nginx-deployment-65bbdb5f8-p2xwh" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-p2xwh,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-65bbdb5f8-p2xwh,UID:aba8bd63-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538196,Generation:0,CreationTimestamp:2019-06-18 07:49:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 aa6c884a-919d-11e9-8cfd-00163e000a67 0xc0016aea97 0xc0016aea98}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc0016aeb10} {node.kubernetes.io/unreachable Exists NoExecute 0xc0016aeb30}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:58 +0000 UTC }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.764: INFO: Pod "nginx-deployment-65bbdb5f8-s4v95" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-s4v95,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-65bbdb5f8-s4v95,UID:aa6e082f-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538093,Generation:0,CreationTimestamp:2019-06-18 07:49:56 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 aa6c884a-919d-11e9-8cfd-00163e000a67 0xc0016aeba7 0xc0016aeba8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc0016aec20} {node.kubernetes.io/unreachable Exists NoExecute 0xc0016aec40}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC }],Message:,Reason:,HostIP:192.168.2.151,PodIP:,StartTime:2019-06-18 07:49:56 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404 }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.764: INFO: Pod "nginx-deployment-65bbdb5f8-schhr" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-schhr,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-65bbdb5f8-schhr,UID:aba69529-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538191,Generation:0,CreationTimestamp:2019-06-18 07:49:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 aa6c884a-919d-11e9-8cfd-00163e000a67 0xc0016aed07 0xc0016aed08}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc0016aeec0} {node.kubernetes.io/unreachable Exists NoExecute 0xc0016aeee0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:58 +0000 UTC }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 18 07:49:58.764: INFO: Pod "nginx-deployment-65bbdb5f8-tgmng" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-tgmng,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-bjvpd,SelfLink:/api/v1/namespaces/e2e-tests-deployment-bjvpd/pods/nginx-deployment-65bbdb5f8-tgmng,UID:aa7280b9-919d-11e9-8cfd-00163e000a67,ResourceVersion:13538108,Generation:0,CreationTimestamp:2019-06-18 07:49:56 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 aa6c884a-919d-11e9-8cfd-00163e000a67 0xc0016aef57 0xc0016aef58}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-879dx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-879dx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [{default-token-879dx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc0016aefd0} {node.kubernetes.io/unreachable Exists NoExecute 0xc0016aeff0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:49:56 +0000 UTC }],Message:,Reason:,HostIP:192.168.2.153,PodIP:,StartTime:2019-06-18 07:49:56 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404 }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +[AfterEach] [sig-apps] Deployment + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:49:58.764: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-deployment-bjvpd" for this suite. +Jun 18 07:50:16.793: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:50:17.532: INFO: namespace: e2e-tests-deployment-bjvpd, resource: bindings, ignored listing per whitelist +Jun 18 07:50:17.610: INFO: namespace e2e-tests-deployment-bjvpd deletion completed in 18.837266337s + +• [SLOW TEST:26.091 seconds] +[sig-apps] Deployment +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + deployment should support proportional scaling [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (non-root,0644,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:50:17.610: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename emptydir +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-wfhgf +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (non-root,0644,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test emptydir 0644 on tmpfs +Jun 18 07:50:18.636: INFO: Waiting up to 5m0s for pod "pod-b7842eeb-919d-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-emptydir-wfhgf" to be "success or failure" +Jun 18 07:50:18.638: INFO: Pod "pod-b7842eeb-919d-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022678ms +Jun 18 07:50:20.641: INFO: Pod "pod-b7842eeb-919d-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.004609472s +Jun 18 07:50:22.644: INFO: Pod "pod-b7842eeb-919d-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 4.007792072s +Jun 18 07:50:24.647: INFO: Pod "pod-b7842eeb-919d-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 6.010524396s +Jun 18 07:50:26.650: INFO: Pod "pod-b7842eeb-919d-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 8.014236876s +Jun 18 07:50:28.654: INFO: Pod "pod-b7842eeb-919d-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 10.017867122s +STEP: Saw pod success +Jun 18 07:50:28.654: INFO: Pod "pod-b7842eeb-919d-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:50:28.656: INFO: Trying to get logs from node node5 pod pod-b7842eeb-919d-11e9-bbf5-0e74dabf3615 container test-container: +STEP: delete the pod +Jun 18 07:50:28.668: INFO: Waiting for pod pod-b7842eeb-919d-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:50:28.671: INFO: Pod pod-b7842eeb-919d-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:50:28.671: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-emptydir-wfhgf" for this suite. +Jun 18 07:50:36.684: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:50:36.764: INFO: namespace: e2e-tests-emptydir-wfhgf, resource: bindings, ignored listing per whitelist +Jun 18 07:50:37.576: INFO: namespace e2e-tests-emptydir-wfhgf deletion completed in 8.901489035s + +• [SLOW TEST:19.965 seconds] +[sig-storage] EmptyDir volumes +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40 + should support (non-root,0644,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-storage] Secrets + should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Secrets + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:50:37.576: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename secrets +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-secrets-4jmbk +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating secret with name secret-test-c360c33d-919d-11e9-bbf5-0e74dabf3615 +STEP: Creating a pod to test consume secrets +Jun 18 07:50:38.560: INFO: Waiting up to 5m0s for pod "pod-secrets-c362b545-919d-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-secrets-4jmbk" to be "success or failure" +Jun 18 07:50:38.576: INFO: Pod "pod-secrets-c362b545-919d-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 16.458849ms +Jun 18 07:50:40.579: INFO: Pod "pod-secrets-c362b545-919d-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018945496s +Jun 18 07:50:42.584: INFO: Pod "pod-secrets-c362b545-919d-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.024212446s +STEP: Saw pod success +Jun 18 07:50:42.584: INFO: Pod "pod-secrets-c362b545-919d-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:50:42.587: INFO: Trying to get logs from node node5 pod pod-secrets-c362b545-919d-11e9-bbf5-0e74dabf3615 container secret-volume-test: +STEP: delete the pod +Jun 18 07:50:42.615: INFO: Waiting for pod pod-secrets-c362b545-919d-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:50:42.617: INFO: Pod pod-secrets-c362b545-919d-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] Secrets + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:50:42.617: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-secrets-4jmbk" for this suite. +Jun 18 07:50:52.631: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:50:55.589: INFO: namespace: e2e-tests-secrets-4jmbk, resource: bindings, ignored listing per whitelist +Jun 18 07:50:55.625: INFO: namespace e2e-tests-secrets-4jmbk deletion completed in 13.005216101s + +• [SLOW TEST:18.049 seconds] +[sig-storage] Secrets +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:34 + should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSS +------------------------------ +[k8s.io] InitContainer [NodeConformance] + should invoke init containers on a RestartNever pod [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:50:55.625: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename init-container +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-init-container-qfkwg +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:43 +[It] should invoke init containers on a RestartNever pod [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating the pod +Jun 18 07:50:56.636: INFO: PodSpec: initContainers in spec.initContainers +[AfterEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:51:02.542: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-init-container-qfkwg" for this suite. +Jun 18 07:51:12.579: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:51:12.663: INFO: namespace: e2e-tests-init-container-qfkwg, resource: bindings, ignored listing per whitelist +Jun 18 07:51:13.529: INFO: namespace e2e-tests-init-container-qfkwg deletion completed in 10.967234417s + +• [SLOW TEST:17.904 seconds] +[k8s.io] InitContainer [NodeConformance] +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should invoke init containers on a RestartNever pod [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl run deployment + should create a deployment from an image [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:51:13.530: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-pmslh +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[BeforeEach] [k8s.io] Kubectl run deployment + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1399 +[It] should create a deployment from an image [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: running the image docker.io/library/nginx:1.14-alpine +Jun 18 07:51:14.514: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 run e2e-test-nginx-deployment --image=docker.io/library/nginx:1.14-alpine --generator=deployment/v1beta1 --namespace=e2e-tests-kubectl-pmslh' +Jun 18 07:51:14.625: INFO: stderr: "kubectl run --generator=deployment/v1beta1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n" +Jun 18 07:51:14.625: INFO: stdout: "deployment.extensions/e2e-test-nginx-deployment created\n" +STEP: verifying the deployment e2e-test-nginx-deployment was created +STEP: verifying the pod controlled by deployment e2e-test-nginx-deployment was created +[AfterEach] [k8s.io] Kubectl run deployment + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1404 +Jun 18 07:51:18.634: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 delete deployment e2e-test-nginx-deployment --namespace=e2e-tests-kubectl-pmslh' +Jun 18 07:51:19.558: INFO: stderr: "" +Jun 18 07:51:19.558: INFO: stdout: "deployment.extensions \"e2e-test-nginx-deployment\" deleted\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:51:19.558: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-pmslh" for this suite. +Jun 18 07:51:31.583: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:51:31.638: INFO: namespace: e2e-tests-kubectl-pmslh, resource: bindings, ignored listing per whitelist +Jun 18 07:51:32.559: INFO: namespace e2e-tests-kubectl-pmslh deletion completed in 12.997257615s + +• [SLOW TEST:19.029 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Kubectl run deployment + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should create a deployment from an image [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSS +------------------------------ +[k8s.io] InitContainer [NodeConformance] + should not start app containers if init containers fail on a RestartAlways pod [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:51:32.559: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename init-container +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-init-container-swk5g +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:43 +[It] should not start app containers if init containers fail on a RestartAlways pod [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating the pod +Jun 18 07:51:33.613: INFO: PodSpec: initContainers in spec.initContainers +Jun 18 07:52:14.596: INFO: init container has failed twice: &v1.Pod{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"pod-init-e435bb50-919d-11e9-bbf5-0e74dabf3615", GenerateName:"", Namespace:"e2e-tests-init-container-swk5g", SelfLink:"/api/v1/namespaces/e2e-tests-init-container-swk5g/pods/pod-init-e435bb50-919d-11e9-bbf5-0e74dabf3615", UID:"e438155a-919d-11e9-8cfd-00163e000a67", ResourceVersion:"13539402", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63696441093, loc:(*time.Location)(0x7b57be0)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"name":"foo", "time":"613043337"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Initializers:(*v1.Initializers)(nil), Finalizers:[]string(nil), ClusterName:""}, Spec:v1.PodSpec{Volumes:[]v1.Volume{v1.Volume{Name:"default-token-mx8w7", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(nil), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(0xc000cee080), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil)}}}, InitContainers:[]v1.Container{v1.Container{Name:"init1", Image:"docker.io/library/busybox:1.29", Command:[]string{"/bin/false"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-mx8w7", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil)}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}, v1.Container{Name:"init2", Image:"docker.io/library/busybox:1.29", Command:[]string{"/bin/true"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-mx8w7", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil)}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, Containers:[]v1.Container{v1.Container{Name:"run1", Image:"reg.kpaas.io/pause:3.1", Command:[]string(nil), Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:52428800, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"52428800", Format:"DecimalSI"}}, Requests:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:52428800, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"52428800", Format:"DecimalSI"}}}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-mx8w7", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil)}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc001a32e78), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string(nil), ServiceAccountName:"default", DeprecatedServiceAccount:"default", AutomountServiceAccountToken:(*bool)(nil), NodeName:"node5", HostNetwork:false, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc00114f860), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(nil), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration{v1.Toleration{Key:"node.kubernetes.io/memory-pressure", Operator:"Exists", Value:"", Effect:"NoSchedule", TolerationSeconds:(*int64)(nil)}, v1.Toleration{Key:"node.kubernetes.io/not-ready", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc001a32f10)}, v1.Toleration{Key:"node.kubernetes.io/unreachable", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc001a32f30)}}, HostAliases:[]v1.HostAlias(nil), PriorityClassName:"", Priority:(*int32)(0xc001a32f38), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(0xc001a32f3c)}, Status:v1.PodStatus{Phase:"Pending", Conditions:[]v1.PodCondition{v1.PodCondition{Type:"Initialized", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696441094, loc:(*time.Location)(0x7b57be0)}}, Reason:"ContainersNotInitialized", Message:"containers with incomplete status: [init1 init2]"}, v1.PodCondition{Type:"Ready", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696441094, loc:(*time.Location)(0x7b57be0)}}, Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"ContainersReady", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696441094, loc:(*time.Location)(0x7b57be0)}}, Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"PodScheduled", Status:"True", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696441093, loc:(*time.Location)(0x7b57be0)}}, Reason:"", Message:""}}, Message:"", Reason:"", NominatedNodeName:"", HostIP:"192.168.2.155", PodIP:"171.171.33.154", StartTime:(*v1.Time)(0xc0017537a0), InitContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"init1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(0xc0002d2ee0)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(0xc0002d3110)}, Ready:false, RestartCount:3, Image:"busybox:1.29", ImageID:"docker-pullable://busybox@sha256:8ccbac733d19c0dd4d70b4f0c1e12245b5fa3ad24758a11035ee505c629c0796", ContainerID:"docker://788f5b3385aa0c49f53f24a18db7510863614cee0889ff74a337c4d5fe6f5f64"}, v1.ContainerStatus{Name:"init2", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc0017537e0), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"docker.io/library/busybox:1.29", ImageID:"", ContainerID:""}}, ContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"run1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc0017537c0), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"reg.kpaas.io/pause:3.1", ImageID:"", ContainerID:""}}, QOSClass:"Guaranteed"}} +[AfterEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:52:14.597: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-init-container-swk5g" for this suite. +Jun 18 07:52:38.612: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:52:38.749: INFO: namespace: e2e-tests-init-container-swk5g, resource: bindings, ignored listing per whitelist +Jun 18 07:52:39.541: INFO: namespace e2e-tests-init-container-swk5g deletion completed in 24.940087039s + +• [SLOW TEST:66.982 seconds] +[k8s.io] InitContainer [NodeConformance] +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should not start app containers if init containers fail on a RestartAlways pod [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-auth] ServiceAccounts + should allow opting out of API token automount [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-auth] ServiceAccounts + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:52:39.541: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename svcaccounts +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-svcaccounts-f4nbn +STEP: Waiting for a default service account to be provisioned in namespace +[It] should allow opting out of API token automount [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: getting the auto-created API token +Jun 18 07:52:40.524: INFO: created pod pod-service-account-defaultsa +Jun 18 07:52:40.524: INFO: pod pod-service-account-defaultsa service account token volume mount: true +Jun 18 07:52:40.527: INFO: created pod pod-service-account-mountsa +Jun 18 07:52:40.527: INFO: pod pod-service-account-mountsa service account token volume mount: true +Jun 18 07:52:40.532: INFO: created pod pod-service-account-nomountsa +Jun 18 07:52:40.532: INFO: pod pod-service-account-nomountsa service account token volume mount: false +Jun 18 07:52:40.535: INFO: created pod pod-service-account-defaultsa-mountspec +Jun 18 07:52:40.535: INFO: pod pod-service-account-defaultsa-mountspec service account token volume mount: true +Jun 18 07:52:40.541: INFO: created pod pod-service-account-mountsa-mountspec +Jun 18 07:52:40.541: INFO: pod pod-service-account-mountsa-mountspec service account token volume mount: true +Jun 18 07:52:40.548: INFO: created pod pod-service-account-nomountsa-mountspec +Jun 18 07:52:40.548: INFO: pod pod-service-account-nomountsa-mountspec service account token volume mount: true +Jun 18 07:52:40.553: INFO: created pod pod-service-account-defaultsa-nomountspec +Jun 18 07:52:40.553: INFO: pod pod-service-account-defaultsa-nomountspec service account token volume mount: false +Jun 18 07:52:40.562: INFO: created pod pod-service-account-mountsa-nomountspec +Jun 18 07:52:40.562: INFO: pod pod-service-account-mountsa-nomountspec service account token volume mount: false +Jun 18 07:52:40.567: INFO: created pod pod-service-account-nomountsa-nomountspec +Jun 18 07:52:40.568: INFO: pod pod-service-account-nomountsa-nomountspec service account token volume mount: false +[AfterEach] [sig-auth] ServiceAccounts + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:52:40.568: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-svcaccounts-f4nbn" for this suite. +Jun 18 07:53:08.614: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:53:09.685: INFO: namespace: e2e-tests-svcaccounts-f4nbn, resource: bindings, ignored listing per whitelist +Jun 18 07:53:09.703: INFO: namespace e2e-tests-svcaccounts-f4nbn deletion completed in 29.124696495s + +• [SLOW TEST:30.162 seconds] +[sig-auth] ServiceAccounts +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/auth/framework.go:22 + should allow opting out of API token automount [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SS +------------------------------ +[sig-node] Downward API + should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-node] Downward API + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:53:09.703: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename downward-api +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-mccw9 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward api env vars +Jun 18 07:53:10.672: INFO: Waiting up to 5m0s for pod "downward-api-1e0ef06e-919e-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-downward-api-mccw9" to be "success or failure" +Jun 18 07:53:10.676: INFO: Pod "downward-api-1e0ef06e-919e-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 3.881729ms +Jun 18 07:53:12.679: INFO: Pod "downward-api-1e0ef06e-919e-11e9-bbf5-0e74dabf3615": Phase="Running", Reason="", readiness=true. Elapsed: 2.007038152s +Jun 18 07:53:14.682: INFO: Pod "downward-api-1e0ef06e-919e-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.00958112s +STEP: Saw pod success +Jun 18 07:53:14.682: INFO: Pod "downward-api-1e0ef06e-919e-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:53:14.683: INFO: Trying to get logs from node node5 pod downward-api-1e0ef06e-919e-11e9-bbf5-0e74dabf3615 container dapi-container: +STEP: delete the pod +Jun 18 07:53:14.700: INFO: Waiting for pod downward-api-1e0ef06e-919e-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:53:14.703: INFO: Pod downward-api-1e0ef06e-919e-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-node] Downward API + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:53:14.703: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-downward-api-mccw9" for this suite. +Jun 18 07:53:22.727: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:53:22.844: INFO: namespace: e2e-tests-downward-api-mccw9, resource: bindings, ignored listing per whitelist +Jun 18 07:53:23.545: INFO: namespace e2e-tests-downward-api-mccw9 deletion completed in 8.839434366s + +• [SLOW TEST:13.842 seconds] +[sig-node] Downward API +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:38 + should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-storage] Projected configMap + updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected configMap + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:53:23.545: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-zqjkn +STEP: Waiting for a default service account to be provisioned in namespace +[It] updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating projection with configMap that has name projected-configmap-test-upd-266587eb-919e-11e9-bbf5-0e74dabf3615 +STEP: Creating the pod +STEP: Updating configmap projected-configmap-test-upd-266587eb-919e-11e9-bbf5-0e74dabf3615 +STEP: waiting to observe update in volume +[AfterEach] [sig-storage] Projected configMap + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:53:30.704: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-zqjkn" for this suite. +Jun 18 07:53:56.717: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:53:56.733: INFO: namespace: e2e-tests-projected-zqjkn, resource: bindings, ignored listing per whitelist +Jun 18 07:53:57.566: INFO: namespace e2e-tests-projected-zqjkn deletion completed in 26.858040699s + +• [SLOW TEST:34.021 seconds] +[sig-storage] Projected configMap +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:34 + updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (non-root,0666,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:53:57.566: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename emptydir +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-2xshc +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (non-root,0666,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test emptydir 0666 on tmpfs +Jun 18 07:53:58.635: INFO: Waiting up to 5m0s for pod "pod-3a968a4c-919e-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-emptydir-2xshc" to be "success or failure" +Jun 18 07:53:58.641: INFO: Pod "pod-3a968a4c-919e-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 5.339677ms +Jun 18 07:54:00.643: INFO: Pod "pod-3a968a4c-919e-11e9-bbf5-0e74dabf3615": Phase="Running", Reason="", readiness=true. Elapsed: 2.007917093s +Jun 18 07:54:02.649: INFO: Pod "pod-3a968a4c-919e-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.013405129s +STEP: Saw pod success +Jun 18 07:54:02.649: INFO: Pod "pod-3a968a4c-919e-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:54:02.652: INFO: Trying to get logs from node node5 pod pod-3a968a4c-919e-11e9-bbf5-0e74dabf3615 container test-container: +STEP: delete the pod +Jun 18 07:54:02.669: INFO: Waiting for pod pod-3a968a4c-919e-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:54:02.683: INFO: Pod pod-3a968a4c-919e-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:54:02.683: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-emptydir-2xshc" for this suite. +Jun 18 07:54:10.703: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:54:10.748: INFO: namespace: e2e-tests-emptydir-2xshc, resource: bindings, ignored listing per whitelist +Jun 18 07:54:11.557: INFO: namespace e2e-tests-emptydir-2xshc deletion completed in 8.865942155s + +• [SLOW TEST:13.991 seconds] +[sig-storage] EmptyDir volumes +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40 + should support (non-root,0666,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl run --rm job + should create a job from an image, then delete the job [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:54:11.557: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-ffghd +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[It] should create a job from an image, then delete the job [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: executing a command with run --rm and attach with stdin +Jun 18 07:54:11.740: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 --namespace=e2e-tests-kubectl-ffghd run e2e-test-rm-busybox-job --image=docker.io/library/busybox:1.29 --rm=true --generator=job/v1 --restart=OnFailure --attach=true --stdin -- sh -c cat && echo 'stdin closed'' +Jun 18 07:54:15.819: INFO: stderr: "kubectl run --generator=job/v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\nIf you don't see a command prompt, try pressing enter.\n" +Jun 18 07:54:15.819: INFO: stdout: "abcd1234stdin closed\njob.batch \"e2e-test-rm-busybox-job\" deleted\n" +STEP: verifying the job e2e-test-rm-busybox-job was deleted +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:54:17.824: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-ffghd" for this suite. +Jun 18 07:54:25.837: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:54:26.618: INFO: namespace: e2e-tests-kubectl-ffghd, resource: bindings, ignored listing per whitelist +Jun 18 07:54:26.816: INFO: namespace e2e-tests-kubectl-ffghd deletion completed in 8.988644264s + +• [SLOW TEST:15.259 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Kubectl run --rm job + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should create a job from an image, then delete the job [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +S +------------------------------ +[sig-storage] Downward API volume + should provide container's cpu request [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:54:26.816: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename downward-api +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-jtmpv +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39 +[It] should provide container's cpu request [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +Jun 18 07:54:27.707: INFO: Waiting up to 5m0s for pod "downwardapi-volume-4bf99818-919e-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-downward-api-jtmpv" to be "success or failure" +Jun 18 07:54:27.709: INFO: Pod "downwardapi-volume-4bf99818-919e-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.233159ms +Jun 18 07:54:29.712: INFO: Pod "downwardapi-volume-4bf99818-919e-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.00489829s +Jun 18 07:54:31.714: INFO: Pod "downwardapi-volume-4bf99818-919e-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.007455843s +STEP: Saw pod success +Jun 18 07:54:31.714: INFO: Pod "downwardapi-volume-4bf99818-919e-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:54:31.719: INFO: Trying to get logs from node node5 pod downwardapi-volume-4bf99818-919e-11e9-bbf5-0e74dabf3615 container client-container: +STEP: delete the pod +Jun 18 07:54:31.731: INFO: Waiting for pod downwardapi-volume-4bf99818-919e-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:54:31.734: INFO: Pod downwardapi-volume-4bf99818-919e-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:54:31.734: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-downward-api-jtmpv" for this suite. +Jun 18 07:54:37.747: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:54:38.654: INFO: namespace: e2e-tests-downward-api-jtmpv, resource: bindings, ignored listing per whitelist +Jun 18 07:54:39.526: INFO: namespace e2e-tests-downward-api-jtmpv deletion completed in 7.788266772s + +• [SLOW TEST:12.710 seconds] +[sig-storage] Downward API volume +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34 + should provide container's cpu request [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSS +------------------------------ +[sig-network] Proxy version v1 + should proxy through a service and a pod [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] version v1 + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:54:39.526: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename proxy +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-proxy-cgmt2 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should proxy through a service and a pod [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: starting an echo server on multiple ports +STEP: creating replication controller proxy-service-qnlnc in namespace e2e-tests-proxy-cgmt2 +I0618 07:54:40.555299 16 runners.go:184] Created replication controller with name: proxy-service-qnlnc, namespace: e2e-tests-proxy-cgmt2, replica count: 1 +I0618 07:54:41.605684 16 runners.go:184] proxy-service-qnlnc Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0618 07:54:42.605943 16 runners.go:184] proxy-service-qnlnc Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0618 07:54:43.606154 16 runners.go:184] proxy-service-qnlnc Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0618 07:54:44.606349 16 runners.go:184] proxy-service-qnlnc Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady +I0618 07:54:45.606557 16 runners.go:184] proxy-service-qnlnc Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady +I0618 07:54:46.606819 16 runners.go:184] proxy-service-qnlnc Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady +I0618 07:54:47.607063 16 runners.go:184] proxy-service-qnlnc Pods: 1 out of 1 created, 1 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Jun 18 07:54:47.610: INFO: setup took 7.082264257s, starting test cases +STEP: running 16 cases, 20 attempts per case, 320 total attempts +Jun 18 07:54:47.621: INFO: (0) /api/v1/namespaces/e2e-tests-proxy-cgmt2/pods/proxy-service-qnlnc-2t6n2:162/proxy/: bar (200; 10.608378ms) +Jun 18 07:54:47.624: INFO: (0) /api/v1/namespaces/e2e-tests-proxy-cgmt2/pods/http:proxy-service-qnlnc-2t6n2:160/proxy/: foo (200; 13.576145ms) +Jun 18 07:54:47.624: INFO: (0) /api/v1/namespaces/e2e-tests-proxy-cgmt2/services/http:proxy-service-qnlnc:portname2/proxy/: bar (200; 13.748527ms) +Jun 18 07:54:47.624: INFO: (0) /api/v1/namespaces/e2e-tests-proxy-cgmt2/pods/http:proxy-service-qnlnc-2t6n2:162/proxy/: bar (200; 13.483561ms) +Jun 18 07:54:47.650: INFO: (0) /api/v1/namespaces/e2e-tests-proxy-cgmt2/services/http:proxy-service-qnlnc:portname1/proxy/: foo (200; 39.935962ms) +Jun 18 07:54:47.654: INFO: (0) /api/v1/namespaces/e2e-tests-proxy-cgmt2/services/proxy-service-qnlnc:portname1/proxy/: foo (200; 44.097549ms) +Jun 18 07:54:47.657: INFO: (0) /api/v1/namespaces/e2e-tests-proxy-cgmt2/services/proxy-service-qnlnc:portname2/proxy/: bar (200; 47.322923ms) +Jun 18 07:54:47.657: INFO: (0) /api/v1/namespaces/e2e-tests-proxy-cgmt2/pods/proxy-service-qnlnc-2t6n2/proxy/: >> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-chkl2 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap with name projected-configmap-test-volume-62835953-919e-11e9-bbf5-0e74dabf3615 +STEP: Creating a pod to test consume configMaps +Jun 18 07:55:05.530: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-6284bbe7-919e-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-projected-chkl2" to be "success or failure" +Jun 18 07:55:05.543: INFO: Pod "pod-projected-configmaps-6284bbe7-919e-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 12.295286ms +Jun 18 07:55:07.546: INFO: Pod "pod-projected-configmaps-6284bbe7-919e-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015287663s +Jun 18 07:55:09.555: INFO: Pod "pod-projected-configmaps-6284bbe7-919e-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.024674967s +STEP: Saw pod success +Jun 18 07:55:09.555: INFO: Pod "pod-projected-configmaps-6284bbe7-919e-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:55:09.558: INFO: Trying to get logs from node node5 pod pod-projected-configmaps-6284bbe7-919e-11e9-bbf5-0e74dabf3615 container projected-configmap-volume-test: +STEP: delete the pod +Jun 18 07:55:09.573: INFO: Waiting for pod pod-projected-configmaps-6284bbe7-919e-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:55:09.576: INFO: Pod pod-projected-configmaps-6284bbe7-919e-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] Projected configMap + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:55:09.576: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-chkl2" for this suite. +Jun 18 07:55:17.593: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:55:17.700: INFO: namespace: e2e-tests-projected-chkl2, resource: bindings, ignored listing per whitelist +Jun 18 07:55:18.597: INFO: namespace e2e-tests-projected-chkl2 deletion completed in 9.016164424s + +• [SLOW TEST:14.967 seconds] +[sig-storage] Projected configMap +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:34 + should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] Deployment + RollingUpdateDeployment should delete old pods and create new ones [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] Deployment + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:55:18.597: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename deployment +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-deployment-9h6cl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Deployment + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:65 +[It] RollingUpdateDeployment should delete old pods and create new ones [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +Jun 18 07:55:19.683: INFO: Creating replica set "test-rolling-update-controller" (going to be adopted) +Jun 18 07:55:20.620: INFO: Pod name sample-pod: Found 0 pods out of 1 +Jun 18 07:55:25.622: INFO: Pod name sample-pod: Found 1 pods out of 1 +STEP: ensuring each pod is running +Jun 18 07:55:25.622: INFO: Creating deployment "test-rolling-update-deployment" +Jun 18 07:55:25.628: INFO: Ensuring deployment "test-rolling-update-deployment" gets the next revision from the one the adopted replica set "test-rolling-update-controller" has +Jun 18 07:55:25.634: INFO: new replicaset for deployment "test-rolling-update-deployment" is yet to be created +Jun 18 07:55:27.642: INFO: Ensuring status for deployment "test-rolling-update-deployment" is the expected +Jun 18 07:55:27.643: INFO: Ensuring deployment "test-rolling-update-deployment" has one old replica set (the one it adopted) +[AfterEach] [sig-apps] Deployment + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:59 +Jun 18 07:55:27.653: INFO: Deployment "test-rolling-update-deployment": +&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rolling-update-deployment,GenerateName:,Namespace:e2e-tests-deployment-9h6cl,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-9h6cl/deployments/test-rolling-update-deployment,UID:6e7f4f2c-919e-11e9-8cfd-00163e000a67,ResourceVersion:13540750,Generation:1,CreationTimestamp:2019-06-18 07:55:25 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,},Annotations:map[string]string{deployment.kubernetes.io/revision: 3546343826724305833,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:DeploymentSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{redis reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0 [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[{Available True 2019-06-18 07:55:25 +0000 UTC 2019-06-18 07:55:25 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} {Progressing True 2019-06-18 07:55:27 +0000 UTC 2019-06-18 07:55:25 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-rolling-update-deployment-688cb96bf4" has successfully progressed.}],ReadyReplicas:1,CollisionCount:nil,},} + +Jun 18 07:55:27.657: INFO: New ReplicaSet "test-rolling-update-deployment-688cb96bf4" of Deployment "test-rolling-update-deployment": +&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rolling-update-deployment-688cb96bf4,GenerateName:,Namespace:e2e-tests-deployment-9h6cl,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-9h6cl/replicasets/test-rolling-update-deployment-688cb96bf4,UID:6e8172c1-919e-11e9-8cfd-00163e000a67,ResourceVersion:13540741,Generation:1,CreationTimestamp:2019-06-18 07:55:25 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod-template-hash: 688cb96bf4,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 3546343826724305833,},OwnerReferences:[{apps/v1 Deployment test-rolling-update-deployment 6e7f4f2c-919e-11e9-8cfd-00163e000a67 0xc002271217 0xc002271218}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,pod-template-hash: 688cb96bf4,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod-template-hash: 688cb96bf4,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{redis reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0 [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[],},} +Jun 18 07:55:27.657: INFO: All old ReplicaSets of Deployment "test-rolling-update-deployment": +Jun 18 07:55:27.657: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rolling-update-controller,GenerateName:,Namespace:e2e-tests-deployment-9h6cl,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-9h6cl/replicasets/test-rolling-update-controller,UID:6af5793d-919e-11e9-8cfd-00163e000a67,ResourceVersion:13540749,Generation:2,CreationTimestamp:2019-06-18 07:55:19 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod: nginx,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 3546343826724305832,},OwnerReferences:[{apps/v1 Deployment test-rolling-update-deployment 6e7f4f2c-919e-11e9-8cfd-00163e000a67 0xc002271157 0xc002271158}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*0,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,pod: nginx,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod: nginx,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},} +Jun 18 07:55:27.665: INFO: Pod "test-rolling-update-deployment-688cb96bf4-22qr5" is available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rolling-update-deployment-688cb96bf4-22qr5,GenerateName:test-rolling-update-deployment-688cb96bf4-,Namespace:e2e-tests-deployment-9h6cl,SelfLink:/api/v1/namespaces/e2e-tests-deployment-9h6cl/pods/test-rolling-update-deployment-688cb96bf4-22qr5,UID:6e81e5bb-919e-11e9-8cfd-00163e000a67,ResourceVersion:13540740,Generation:0,CreationTimestamp:2019-06-18 07:55:25 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod-template-hash: 688cb96bf4,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet test-rolling-update-deployment-688cb96bf4 6e8172c1-919e-11e9-8cfd-00163e000a67 0xc002172687 0xc002172688}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-nmxgc {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-nmxgc,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{redis reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0 [] [] [] [] [] {map[] map[]} [{default-token-nmxgc true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node5,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc002172700} {node.kubernetes.io/unreachable Exists NoExecute 0xc002172720}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:55:25 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:55:27 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:55:27 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 07:55:25 +0000 UTC }],Message:,Reason:,HostIP:192.168.2.155,PodIP:171.171.33.168,StartTime:2019-06-18 07:55:25 +0000 UTC,ContainerStatuses:[{redis {nil ContainerStateRunning{StartedAt:2019-06-18 07:55:26 +0000 UTC,} nil} {nil nil nil} true 0 reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0 docker-pullable://reg.kpaas.io/kubernetes-e2e-test-images/redis@sha256:2238f5a02d2648d41cc94a88f084060fbfa860890220328eb92696bf2ac649c9 docker://df6a12cf127f08394cc145bc5b3b5ac7da12bdd7e21a1f54cc9d0d3835ea1e04}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +[AfterEach] [sig-apps] Deployment + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:55:27.665: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-deployment-9h6cl" for this suite. +Jun 18 07:55:35.684: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:55:35.749: INFO: namespace: e2e-tests-deployment-9h6cl, resource: bindings, ignored listing per whitelist +Jun 18 07:55:36.530: INFO: namespace e2e-tests-deployment-9h6cl deletion completed in 8.855621604s + +• [SLOW TEST:17.932 seconds] +[sig-apps] Deployment +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + RollingUpdateDeployment should delete old pods and create new ones [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-api-machinery] Garbage collector + should orphan pods created by rc if delete options say so [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-api-machinery] Garbage collector + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:55:36.530: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename gc +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-gc-97b99 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should orphan pods created by rc if delete options say so [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: create the rc +STEP: delete the rc +STEP: wait for the rc to be deleted +STEP: wait for 30 seconds to see if the garbage collector mistakenly deletes the pods +STEP: Gathering metrics +W0618 07:56:17.559181 16 metrics_grabber.go:81] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled. +Jun 18 07:56:17.559: INFO: For apiserver_request_count: +For apiserver_request_latencies_summary: +For etcd_helper_cache_entry_count: +For etcd_helper_cache_hit_count: +For etcd_helper_cache_miss_count: +For etcd_request_cache_add_latencies_summary: +For etcd_request_cache_get_latencies_summary: +For etcd_request_latencies_summary: +For garbage_collector_attempt_to_delete_queue_latency: +For garbage_collector_attempt_to_delete_work_duration: +For garbage_collector_attempt_to_orphan_queue_latency: +For garbage_collector_attempt_to_orphan_work_duration: +For garbage_collector_dirty_processing_latency_microseconds: +For garbage_collector_event_processing_latency_microseconds: +For garbage_collector_graph_changes_queue_latency: +For garbage_collector_graph_changes_work_duration: +For garbage_collector_orphan_processing_latency_microseconds: +For namespace_queue_latency: +For namespace_queue_latency_sum: +For namespace_queue_latency_count: +For namespace_retries: +For namespace_work_duration: +For namespace_work_duration_sum: +For namespace_work_duration_count: +For function_duration_seconds: +For errors_total: +For evicted_pods_total: + +[AfterEach] [sig-api-machinery] Garbage collector + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:56:17.559: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-gc-97b99" for this suite. +Jun 18 07:56:31.583: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:56:32.530: INFO: namespace: e2e-tests-gc-97b99, resource: bindings, ignored listing per whitelist +Jun 18 07:56:32.579: INFO: namespace e2e-tests-gc-97b99 deletion completed in 15.014520264s + +• [SLOW TEST:56.050 seconds] +[sig-api-machinery] Garbage collector +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + should orphan pods created by rc if delete options say so [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +S +------------------------------ +[sig-storage] Downward API volume + should set mode on item file [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:56:32.579: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename downward-api +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-lqzhd +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39 +[It] should set mode on item file [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +Jun 18 07:56:33.536: INFO: Waiting up to 5m0s for pod "downwardapi-volume-96f83db5-919e-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-downward-api-lqzhd" to be "success or failure" +Jun 18 07:56:33.548: INFO: Pod "downwardapi-volume-96f83db5-919e-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 12.236914ms +Jun 18 07:56:35.564: INFO: Pod "downwardapi-volume-96f83db5-919e-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.027634391s +Jun 18 07:56:37.567: INFO: Pod "downwardapi-volume-96f83db5-919e-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 4.030716653s +Jun 18 07:56:40.589: INFO: Pod "downwardapi-volume-96f83db5-919e-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 7.053134394s +STEP: Saw pod success +Jun 18 07:56:40.589: INFO: Pod "downwardapi-volume-96f83db5-919e-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:56:40.607: INFO: Trying to get logs from node node5 pod downwardapi-volume-96f83db5-919e-11e9-bbf5-0e74dabf3615 container client-container: +STEP: delete the pod +Jun 18 07:56:40.651: INFO: Waiting for pod downwardapi-volume-96f83db5-919e-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:56:40.663: INFO: Pod downwardapi-volume-96f83db5-919e-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:56:40.663: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-downward-api-lqzhd" for this suite. +Jun 18 07:56:49.549: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:56:49.664: INFO: namespace: e2e-tests-downward-api-lqzhd, resource: bindings, ignored listing per whitelist +Jun 18 07:56:49.864: INFO: namespace e2e-tests-downward-api-lqzhd deletion completed in 9.195397018s + +• [SLOW TEST:17.285 seconds] +[sig-storage] Downward API volume +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34 + should set mode on item file [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSS +------------------------------ +[k8s.io] Docker Containers + should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Docker Containers + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:56:49.864: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename containers +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-containers-r9mtw +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test override command +Jun 18 07:56:50.618: INFO: Waiting up to 5m0s for pod "client-containers-a1282067-919e-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-containers-r9mtw" to be "success or failure" +Jun 18 07:56:50.631: INFO: Pod "client-containers-a1282067-919e-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 12.995696ms +Jun 18 07:56:52.633: INFO: Pod "client-containers-a1282067-919e-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015499695s +Jun 18 07:56:54.636: INFO: Pod "client-containers-a1282067-919e-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.017887138s +STEP: Saw pod success +Jun 18 07:56:54.636: INFO: Pod "client-containers-a1282067-919e-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 07:56:54.638: INFO: Trying to get logs from node node5 pod client-containers-a1282067-919e-11e9-bbf5-0e74dabf3615 container test-container: +STEP: delete the pod +Jun 18 07:56:54.649: INFO: Waiting for pod client-containers-a1282067-919e-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 07:56:54.652: INFO: Pod client-containers-a1282067-919e-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [k8s.io] Docker Containers + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:56:54.652: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-containers-r9mtw" for this suite. +Jun 18 07:57:02.667: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:57:03.522: INFO: namespace: e2e-tests-containers-r9mtw, resource: bindings, ignored listing per whitelist +Jun 18 07:57:03.568: INFO: namespace e2e-tests-containers-r9mtw deletion completed in 8.91240512s + +• [SLOW TEST:13.704 seconds] +[k8s.io] Docker Containers +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:57:03.568: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename statefulset +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-statefulset-72fm8 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:59 +[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:74 +STEP: Creating service test in namespace e2e-tests-statefulset-72fm8 +[It] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Initializing watcher for selector baz=blah,foo=bar +STEP: Creating stateful set ss in namespace e2e-tests-statefulset-72fm8 +STEP: Waiting until all stateful set ss replicas will be running in namespace e2e-tests-statefulset-72fm8 +Jun 18 07:57:04.532: INFO: Found 0 stateful pods, waiting for 1 +Jun 18 07:57:14.536: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +STEP: Confirming that stateful set scale up will halt with unhealthy stateful pod +Jun 18 07:57:14.538: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 exec --namespace=e2e-tests-statefulset-72fm8 ss-0 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +Jun 18 07:57:14.695: INFO: stderr: "" +Jun 18 07:57:14.695: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +Jun 18 07:57:14.695: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-0: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +Jun 18 07:57:14.697: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=true +Jun 18 07:57:24.700: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false +Jun 18 07:57:24.700: INFO: Waiting for statefulset status.replicas updated to 0 +Jun 18 07:57:24.710: INFO: Verifying statefulset ss doesn't scale past 1 for another 9.999999384s +Jun 18 07:57:25.713: INFO: Verifying statefulset ss doesn't scale past 1 for another 8.997537947s +Jun 18 07:57:26.716: INFO: Verifying statefulset ss doesn't scale past 1 for another 7.994123791s +Jun 18 07:57:27.725: INFO: Verifying statefulset ss doesn't scale past 1 for another 6.991587388s +Jun 18 07:57:28.728: INFO: Verifying statefulset ss doesn't scale past 1 for another 5.982799281s +Jun 18 07:57:29.731: INFO: Verifying statefulset ss doesn't scale past 1 for another 4.97931743s +Jun 18 07:57:30.734: INFO: Verifying statefulset ss doesn't scale past 1 for another 3.976516464s +Jun 18 07:57:31.737: INFO: Verifying statefulset ss doesn't scale past 1 for another 2.973581191s +Jun 18 07:57:32.740: INFO: Verifying statefulset ss doesn't scale past 1 for another 1.97061773s +Jun 18 07:57:33.744: INFO: Verifying statefulset ss doesn't scale past 1 for another 967.02937ms +STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace e2e-tests-statefulset-72fm8 +Jun 18 07:57:34.747: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 exec --namespace=e2e-tests-statefulset-72fm8 ss-0 -- /bin/sh -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 18 07:57:34.892: INFO: stderr: "" +Jun 18 07:57:34.892: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n" +Jun 18 07:57:34.893: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-0: '/tmp/index.html' -> '/usr/share/nginx/html/index.html' + +Jun 18 07:57:34.895: INFO: Found 1 stateful pods, waiting for 3 +Jun 18 07:57:44.898: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +Jun 18 07:57:44.898: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true +Jun 18 07:57:44.899: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Verifying that stateful set ss was scaled up in order +STEP: Scale down will halt with unhealthy stateful pod +Jun 18 07:57:44.903: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 exec --namespace=e2e-tests-statefulset-72fm8 ss-0 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +Jun 18 07:57:45.062: INFO: stderr: "" +Jun 18 07:57:45.062: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +Jun 18 07:57:45.062: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-0: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +Jun 18 07:57:45.062: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 exec --namespace=e2e-tests-statefulset-72fm8 ss-1 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +Jun 18 07:57:45.559: INFO: stderr: "" +Jun 18 07:57:45.559: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +Jun 18 07:57:45.559: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-1: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +Jun 18 07:57:45.559: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 exec --namespace=e2e-tests-statefulset-72fm8 ss-2 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +Jun 18 07:57:45.706: INFO: stderr: "" +Jun 18 07:57:45.706: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +Jun 18 07:57:45.706: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-2: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +Jun 18 07:57:45.706: INFO: Waiting for statefulset status.replicas updated to 0 +Jun 18 07:57:45.708: INFO: Waiting for stateful set status.readyReplicas to become 0, currently 2 +Jun 18 07:57:55.712: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false +Jun 18 07:57:55.712: INFO: Waiting for pod ss-1 to enter Running - Ready=false, currently Running - Ready=false +Jun 18 07:57:55.712: INFO: Waiting for pod ss-2 to enter Running - Ready=false, currently Running - Ready=false +Jun 18 07:57:55.725: INFO: Verifying statefulset ss doesn't scale past 3 for another 9.999999458s +Jun 18 07:57:56.728: INFO: Verifying statefulset ss doesn't scale past 3 for another 8.992107476s +Jun 18 07:57:57.731: INFO: Verifying statefulset ss doesn't scale past 3 for another 7.988989383s +Jun 18 07:57:58.734: INFO: Verifying statefulset ss doesn't scale past 3 for another 6.986084186s +Jun 18 07:57:59.737: INFO: Verifying statefulset ss doesn't scale past 3 for another 5.983255921s +Jun 18 07:58:00.741: INFO: Verifying statefulset ss doesn't scale past 3 for another 4.979487541s +Jun 18 07:58:02.556: INFO: Verifying statefulset ss doesn't scale past 3 for another 3.976129616s +Jun 18 07:58:03.562: INFO: Verifying statefulset ss doesn't scale past 3 for another 2.161577602s +Jun 18 07:58:04.567: INFO: Verifying statefulset ss doesn't scale past 3 for another 1.155948977s +Jun 18 07:58:05.570: INFO: Verifying statefulset ss doesn't scale past 3 for another 150.633589ms +STEP: Scaling down stateful set ss to 0 replicas and waiting until none of pods will run in namespacee2e-tests-statefulset-72fm8 +Jun 18 07:58:06.574: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 exec --namespace=e2e-tests-statefulset-72fm8 ss-0 -- /bin/sh -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 18 07:58:06.774: INFO: stderr: "" +Jun 18 07:58:06.774: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n" +Jun 18 07:58:06.774: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-0: '/tmp/index.html' -> '/usr/share/nginx/html/index.html' + +Jun 18 07:58:06.774: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 exec --namespace=e2e-tests-statefulset-72fm8 ss-1 -- /bin/sh -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 18 07:58:06.917: INFO: stderr: "" +Jun 18 07:58:06.918: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n" +Jun 18 07:58:06.918: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-1: '/tmp/index.html' -> '/usr/share/nginx/html/index.html' + +Jun 18 07:58:06.918: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 exec --namespace=e2e-tests-statefulset-72fm8 ss-2 -- /bin/sh -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 18 07:58:07.603: INFO: stderr: "" +Jun 18 07:58:07.603: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n" +Jun 18 07:58:07.603: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-2: '/tmp/index.html' -> '/usr/share/nginx/html/index.html' + +Jun 18 07:58:07.603: INFO: Scaling statefulset ss to 0 +STEP: Verifying that stateful set ss was scaled down in reverse order +[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:85 +Jun 18 07:58:27.628: INFO: Deleting all statefulset in ns e2e-tests-statefulset-72fm8 +Jun 18 07:58:27.630: INFO: Scaling statefulset ss to 0 +Jun 18 07:58:27.637: INFO: Waiting for statefulset status.replicas updated to 0 +Jun 18 07:58:27.641: INFO: Deleting statefulset ss +[AfterEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:58:27.668: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-statefulset-72fm8" for this suite. +Jun 18 07:58:35.691: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 07:58:35.768: INFO: namespace: e2e-tests-statefulset-72fm8, resource: bindings, ignored listing per whitelist +Jun 18 07:58:36.560: INFO: namespace e2e-tests-statefulset-72fm8 deletion completed in 8.888786659s + +• [SLOW TEST:92.993 seconds] +[sig-apps] StatefulSet +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSS +------------------------------ +[k8s.io] Probing container + with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 07:58:36.561: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename container-probe +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-container-probe-hgtrk +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:48 +[It] with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[AfterEach] [k8s.io] Probing container + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 07:59:37.563: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-container-probe-hgtrk" for this suite. +Jun 18 08:00:05.582: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 08:00:05.612: INFO: namespace: e2e-tests-container-probe-hgtrk, resource: bindings, ignored listing per whitelist +Jun 18 08:00:06.519: INFO: namespace e2e-tests-container-probe-hgtrk deletion completed in 28.95151254s + +• [SLOW TEST:89.958 seconds] +[k8s.io] Probing container +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +S +------------------------------ +[sig-storage] Projected downwardAPI + should set DefaultMode on files [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 08:00:06.519: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-w88r6 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39 +[It] should set DefaultMode on files [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +Jun 18 08:00:06.702: INFO: Waiting up to 5m0s for pod "downwardapi-volume-16079d56-919f-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-projected-w88r6" to be "success or failure" +Jun 18 08:00:06.705: INFO: Pod "downwardapi-volume-16079d56-919f-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.51689ms +Jun 18 08:00:08.710: INFO: Pod "downwardapi-volume-16079d56-919f-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.007716719s +Jun 18 08:00:10.712: INFO: Pod "downwardapi-volume-16079d56-919f-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.010052018s +STEP: Saw pod success +Jun 18 08:00:10.712: INFO: Pod "downwardapi-volume-16079d56-919f-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 08:00:10.714: INFO: Trying to get logs from node node5 pod downwardapi-volume-16079d56-919f-11e9-bbf5-0e74dabf3615 container client-container: +STEP: delete the pod +Jun 18 08:00:10.730: INFO: Waiting for pod downwardapi-volume-16079d56-919f-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 08:00:10.731: INFO: Pod downwardapi-volume-16079d56-919f-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 08:00:10.731: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-w88r6" for this suite. +Jun 18 08:00:19.518: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 08:00:19.594: INFO: namespace: e2e-tests-projected-w88r6, resource: bindings, ignored listing per whitelist +Jun 18 08:00:20.560: INFO: namespace e2e-tests-projected-w88r6 deletion completed in 9.815856685s + +• [SLOW TEST:14.041 seconds] +[sig-storage] Projected downwardAPI +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33 + should set DefaultMode on files [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSS +------------------------------ +[sig-api-machinery] Watchers + should be able to start watching from a specific resource version [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-api-machinery] Watchers + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 08:00:20.560: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename watch +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-watch-8d7kv +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be able to start watching from a specific resource version [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating a new configmap +STEP: modifying the configmap once +STEP: modifying the configmap a second time +STEP: deleting the configmap +STEP: creating a watch on configmaps from the resource version returned by the first update +STEP: Expecting to observe notifications for all changes to the configmap after the first update +Jun 18 08:00:21.540: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-resource-version,GenerateName:,Namespace:e2e-tests-watch-8d7kv,SelfLink:/api/v1/namespaces/e2e-tests-watch-8d7kv/configmaps/e2e-watch-test-resource-version,UID:1e675c50-919f-11e9-8cfd-00163e000a67,ResourceVersion:13542723,Generation:0,CreationTimestamp:2019-06-18 08:00:20 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: from-resource-version,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},} +Jun 18 08:00:21.540: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-resource-version,GenerateName:,Namespace:e2e-tests-watch-8d7kv,SelfLink:/api/v1/namespaces/e2e-tests-watch-8d7kv/configmaps/e2e-watch-test-resource-version,UID:1e675c50-919f-11e9-8cfd-00163e000a67,ResourceVersion:13542724,Generation:0,CreationTimestamp:2019-06-18 08:00:20 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: from-resource-version,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},} +[AfterEach] [sig-api-machinery] Watchers + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 08:00:21.540: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-watch-8d7kv" for this suite. +Jun 18 08:00:29.556: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 08:00:29.663: INFO: namespace: e2e-tests-watch-8d7kv, resource: bindings, ignored listing per whitelist +Jun 18 08:00:30.555: INFO: namespace e2e-tests-watch-8d7kv deletion completed in 9.010547884s + +• [SLOW TEST:9.995 seconds] +[sig-api-machinery] Watchers +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + should be able to start watching from a specific resource version [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[k8s.io] Probing container + should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 08:00:30.555: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename container-probe +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-container-probe-v42x8 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:48 +[It] should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating pod liveness-exec in namespace e2e-tests-container-probe-v42x8 +Jun 18 08:00:33.548: INFO: Started pod liveness-exec in namespace e2e-tests-container-probe-v42x8 +STEP: checking the pod's current state and verifying that restartCount is present +Jun 18 08:00:33.552: INFO: Initial restart count of pod liveness-exec is 0 +STEP: deleting the pod +[AfterEach] [k8s.io] Probing container + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 08:04:34.706: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-container-probe-v42x8" for this suite. +Jun 18 08:04:42.720: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 08:04:42.803: INFO: namespace: e2e-tests-container-probe-v42x8, resource: bindings, ignored listing per whitelist +Jun 18 08:04:43.031: INFO: namespace e2e-tests-container-probe-v42x8 deletion completed in 8.322315552s + +• [SLOW TEST:252.476 seconds] +[k8s.io] Probing container +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl label + should update the label on a resource [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 08:04:43.032: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-qg42c +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[BeforeEach] [k8s.io] Kubectl label + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1052 +STEP: creating the pod +Jun 18 08:04:43.694: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 create -f - --namespace=e2e-tests-kubectl-qg42c' +Jun 18 08:04:44.842: INFO: stderr: "" +Jun 18 08:04:44.842: INFO: stdout: "pod/pause created\n" +Jun 18 08:04:44.842: INFO: Waiting up to 5m0s for 1 pods to be running and ready: [pause] +Jun 18 08:04:44.842: INFO: Waiting up to 5m0s for pod "pause" in namespace "e2e-tests-kubectl-qg42c" to be "running and ready" +Jun 18 08:04:44.848: INFO: Pod "pause": Phase="Pending", Reason="", readiness=false. Elapsed: 5.634826ms +Jun 18 08:04:46.851: INFO: Pod "pause": Phase="Running", Reason="", readiness=true. Elapsed: 2.00847946s +Jun 18 08:04:46.851: INFO: Pod "pause" satisfied condition "running and ready" +Jun 18 08:04:46.851: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [pause] +[It] should update the label on a resource [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: adding the label testing-label with value testing-label-value to a pod +Jun 18 08:04:46.851: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 label pods pause testing-label=testing-label-value --namespace=e2e-tests-kubectl-qg42c' +Jun 18 08:04:46.944: INFO: stderr: "" +Jun 18 08:04:46.944: INFO: stdout: "pod/pause labeled\n" +STEP: verifying the pod has the label testing-label with the value testing-label-value +Jun 18 08:04:46.944: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pod pause -L testing-label --namespace=e2e-tests-kubectl-qg42c' +Jun 18 08:04:47.023: INFO: stderr: "" +Jun 18 08:04:47.023: INFO: stdout: "NAME READY STATUS RESTARTS AGE TESTING-LABEL\npause 1/1 Running 0 3s testing-label-value\n" +STEP: removing the label testing-label of a pod +Jun 18 08:04:47.023: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 label pods pause testing-label- --namespace=e2e-tests-kubectl-qg42c' +Jun 18 08:04:47.108: INFO: stderr: "" +Jun 18 08:04:47.108: INFO: stdout: "pod/pause labeled\n" +STEP: verifying the pod doesn't have the label testing-label +Jun 18 08:04:47.108: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pod pause -L testing-label --namespace=e2e-tests-kubectl-qg42c' +Jun 18 08:04:47.189: INFO: stderr: "" +Jun 18 08:04:47.189: INFO: stdout: "NAME READY STATUS RESTARTS AGE TESTING-LABEL\npause 1/1 Running 0 3s \n" +[AfterEach] [k8s.io] Kubectl label + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1059 +STEP: using delete to clean up resources +Jun 18 08:04:47.189: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-qg42c' +Jun 18 08:04:47.527: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Jun 18 08:04:47.527: INFO: stdout: "pod \"pause\" force deleted\n" +Jun 18 08:04:47.527: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get rc,svc -l name=pause --no-headers --namespace=e2e-tests-kubectl-qg42c' +Jun 18 08:04:47.615: INFO: stderr: "No resources found.\n" +Jun 18 08:04:47.615: INFO: stdout: "" +Jun 18 08:04:47.615: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods -l name=pause --namespace=e2e-tests-kubectl-qg42c -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' +Jun 18 08:04:47.708: INFO: stderr: "" +Jun 18 08:04:47.708: INFO: stdout: "" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 08:04:47.708: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-qg42c" for this suite. +Jun 18 08:04:55.722: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 08:04:56.538: INFO: namespace: e2e-tests-kubectl-qg42c, resource: bindings, ignored listing per whitelist +Jun 18 08:04:56.622: INFO: namespace e2e-tests-kubectl-qg42c deletion completed in 8.910029163s + +• [SLOW TEST:13.591 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Kubectl label + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should update the label on a resource [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +S +------------------------------ +[sig-storage] Downward API volume + should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 08:04:56.622: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename downward-api +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-9p62h +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39 +[It] should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +Jun 18 08:04:56.794: INFO: Waiting up to 5m0s for pod "downwardapi-volume-c2f0eee5-919f-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-downward-api-9p62h" to be "success or failure" +Jun 18 08:04:56.797: INFO: Pod "downwardapi-volume-c2f0eee5-919f-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.78803ms +Jun 18 08:04:58.799: INFO: Pod "downwardapi-volume-c2f0eee5-919f-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.00555592s +Jun 18 08:05:00.803: INFO: Pod "downwardapi-volume-c2f0eee5-919f-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.008858683s +STEP: Saw pod success +Jun 18 08:05:00.803: INFO: Pod "downwardapi-volume-c2f0eee5-919f-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 08:05:00.805: INFO: Trying to get logs from node node5 pod downwardapi-volume-c2f0eee5-919f-11e9-bbf5-0e74dabf3615 container client-container: +STEP: delete the pod +Jun 18 08:05:00.818: INFO: Waiting for pod downwardapi-volume-c2f0eee5-919f-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 08:05:00.821: INFO: Pod downwardapi-volume-c2f0eee5-919f-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 08:05:00.821: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-downward-api-9p62h" for this suite. +Jun 18 08:05:07.513: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 08:05:07.770: INFO: namespace: e2e-tests-downward-api-9p62h, resource: bindings, ignored listing per whitelist +Jun 18 08:05:07.828: INFO: namespace e2e-tests-downward-api-9p62h deletion completed in 6.998289325s + +• [SLOW TEST:11.206 seconds] +[sig-storage] Downward API volume +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34 + should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSS +------------------------------ +[sig-storage] Projected configMap + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected configMap + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 08:05:07.828: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-qxj6r +STEP: Waiting for a default service account to be provisioned in namespace +[It] optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap with name cm-test-opt-del-ca070f68-919f-11e9-bbf5-0e74dabf3615 +STEP: Creating configMap with name cm-test-opt-upd-ca070fa3-919f-11e9-bbf5-0e74dabf3615 +STEP: Creating the pod +STEP: Deleting configmap cm-test-opt-del-ca070f68-919f-11e9-bbf5-0e74dabf3615 +STEP: Updating configmap cm-test-opt-upd-ca070fa3-919f-11e9-bbf5-0e74dabf3615 +STEP: Creating configMap with name cm-test-opt-create-ca070fb5-919f-11e9-bbf5-0e74dabf3615 +STEP: waiting to observe update in volume +[AfterEach] [sig-storage] Projected configMap + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 08:05:15.657: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-qxj6r" for this suite. +Jun 18 08:05:39.669: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 08:05:39.688: INFO: namespace: e2e-tests-projected-qxj6r, resource: bindings, ignored listing per whitelist +Jun 18 08:05:40.513: INFO: namespace e2e-tests-projected-qxj6r deletion completed in 24.852656703s + +• [SLOW TEST:32.685 seconds] +[sig-storage] Projected configMap +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:34 + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSS +------------------------------ +[k8s.io] Variable Expansion + should allow substituting values in a container's args [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Variable Expansion + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 08:05:40.514: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename var-expansion +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-var-expansion-2dc4l +STEP: Waiting for a default service account to be provisioned in namespace +[It] should allow substituting values in a container's args [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test substitution in container's args +Jun 18 08:05:40.697: INFO: Waiting up to 5m0s for pod "var-expansion-dd1b7166-919f-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-var-expansion-2dc4l" to be "success or failure" +Jun 18 08:05:40.701: INFO: Pod "var-expansion-dd1b7166-919f-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 4.184983ms +Jun 18 08:05:43.524: INFO: Pod "var-expansion-dd1b7166-919f-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.826781351s +Jun 18 08:05:45.533: INFO: Pod "var-expansion-dd1b7166-919f-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.835611506s +STEP: Saw pod success +Jun 18 08:05:45.533: INFO: Pod "var-expansion-dd1b7166-919f-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 08:05:45.539: INFO: Trying to get logs from node node5 pod var-expansion-dd1b7166-919f-11e9-bbf5-0e74dabf3615 container dapi-container: +STEP: delete the pod +Jun 18 08:05:45.559: INFO: Waiting for pod var-expansion-dd1b7166-919f-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 08:05:45.564: INFO: Pod var-expansion-dd1b7166-919f-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [k8s.io] Variable Expansion + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 08:05:45.564: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-var-expansion-2dc4l" for this suite. +Jun 18 08:05:53.589: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 08:05:54.555: INFO: namespace: e2e-tests-var-expansion-2dc4l, resource: bindings, ignored listing per whitelist +Jun 18 08:05:54.575: INFO: namespace e2e-tests-var-expansion-2dc4l deletion completed in 9.005708088s + +• [SLOW TEST:14.061 seconds] +[k8s.io] Variable Expansion +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should allow substituting values in a container's args [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] CustomResourceDefinition resources Simple CustomResourceDefinition + creating/deleting custom resource definition objects works [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 08:05:54.575: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename custom-resource-definition +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-custom-resource-definition-bs586 +STEP: Waiting for a default service account to be provisioned in namespace +[It] creating/deleting custom resource definition objects works [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +Jun 18 08:05:56.533: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +[AfterEach] [sig-api-machinery] CustomResourceDefinition resources + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 08:05:59.530: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-custom-resource-definition-bs586" for this suite. +Jun 18 08:06:09.565: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 08:06:10.549: INFO: namespace: e2e-tests-custom-resource-definition-bs586, resource: bindings, ignored listing per whitelist +Jun 18 08:06:11.564: INFO: namespace e2e-tests-custom-resource-definition-bs586 deletion completed in 12.028572617s + +• [SLOW TEST:16.989 seconds] +[sig-api-machinery] CustomResourceDefinition resources +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + Simple CustomResourceDefinition + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/custom_resource_definition.go:35 + creating/deleting custom resource definition objects works [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected downwardAPI + should provide container's cpu request [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 08:06:11.564: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-4pt28 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39 +[It] should provide container's cpu request [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +Jun 18 08:06:12.521: INFO: Waiting up to 5m0s for pod "downwardapi-volume-f0132e91-919f-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-projected-4pt28" to be "success or failure" +Jun 18 08:06:12.523: INFO: Pod "downwardapi-volume-f0132e91-919f-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 1.93306ms +Jun 18 08:06:14.530: INFO: Pod "downwardapi-volume-f0132e91-919f-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.009291924s +Jun 18 08:06:16.537: INFO: Pod "downwardapi-volume-f0132e91-919f-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.016327099s +STEP: Saw pod success +Jun 18 08:06:16.537: INFO: Pod "downwardapi-volume-f0132e91-919f-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 08:06:16.548: INFO: Trying to get logs from node node5 pod downwardapi-volume-f0132e91-919f-11e9-bbf5-0e74dabf3615 container client-container: +STEP: delete the pod +Jun 18 08:06:16.574: INFO: Waiting for pod downwardapi-volume-f0132e91-919f-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 08:06:16.585: INFO: Pod downwardapi-volume-f0132e91-919f-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 08:06:16.585: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-4pt28" for this suite. +Jun 18 08:06:26.643: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 08:06:28.536: INFO: namespace: e2e-tests-projected-4pt28, resource: bindings, ignored listing per whitelist +Jun 18 08:06:28.536: INFO: namespace e2e-tests-projected-4pt28 deletion completed in 11.945386996s + +• [SLOW TEST:16.971 seconds] +[sig-storage] Projected downwardAPI +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33 + should provide container's cpu request [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl api-versions + should check if v1 is in available api versions [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 08:06:28.536: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-qxd9c +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[It] should check if v1 is in available api versions [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: validating api versions +Jun 18 08:06:29.509: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 api-versions' +Jun 18 08:06:29.634: INFO: stderr: "" +Jun 18 08:06:29.634: INFO: stdout: "admissionregistration.k8s.io/v1beta1\nalerting.qiniu.com/v1alpha1\napiextensions.k8s.io/v1beta1\napiregistration.k8s.io/v1\napiregistration.k8s.io/v1beta1\napps/v1\napps/v1beta1\napps/v1beta2\nauthentication.k8s.io/v1\nauthentication.k8s.io/v1beta1\nauthorization.k8s.io/v1\nauthorization.k8s.io/v1beta1\nautoscaling/v1\nautoscaling/v2beta1\nautoscaling/v2beta2\nbatch/v1\nbatch/v1beta1\ncertificates.k8s.io/v1beta1\ncontour.heptio.com/v1beta1\ncoordination.k8s.io/v1beta1\ndummy.kirk.qiniu.com/v1alpha1\nevents.k8s.io/v1beta1\nextensions/v1beta1\nmongors.kirk.qiniu.com/v1\nmonitoring.coreos.com/v1\nmysql.kirk.qiniu.com/v1\nnet.kirkapis.qiniu.com/v1alpha1\nnetworking.k8s.io/v1\npolicy/v1beta1\nqiniuapp.kirkapis.qiniu.com/v1alpha1\nqiniutemplate.kirkapis.qiniu.com/v1alpha1\nrabbitmq.kirk.qiniu.com/v1\nrbac.authorization.k8s.io/v1\nrbac.authorization.k8s.io/v1beta1\nredis.kirk.qiniu.com/v1\nscheduling.k8s.io/v1beta1\nsnapshot.storage.k8s.io/v1alpha1\nstorage.k8s.io/v1\nstorage.k8s.io/v1beta1\nv1\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 08:06:29.634: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-qxd9c" for this suite. +Jun 18 08:06:39.658: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 08:06:39.703: INFO: namespace: e2e-tests-kubectl-qxd9c, resource: bindings, ignored listing per whitelist +Jun 18 08:06:40.567: INFO: namespace e2e-tests-kubectl-qxd9c deletion completed in 10.928312178s + +• [SLOW TEST:12.031 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Kubectl api-versions + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should check if v1 is in available api versions [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +S +------------------------------ +[k8s.io] Pods + should contain environment variables for services [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 08:06:40.567: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename pods +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-pods-lrztn +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:132 +[It] should contain environment variables for services [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +Jun 18 08:06:47.584: INFO: Waiting up to 5m0s for pod "client-envvars-04f95c95-91a0-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-pods-lrztn" to be "success or failure" +Jun 18 08:06:47.587: INFO: Pod "client-envvars-04f95c95-91a0-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 3.439393ms +Jun 18 08:06:49.631: INFO: Pod "client-envvars-04f95c95-91a0-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.046983751s +Jun 18 08:06:52.540: INFO: Pod "client-envvars-04f95c95-91a0-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.956317557s +STEP: Saw pod success +Jun 18 08:06:52.540: INFO: Pod "client-envvars-04f95c95-91a0-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 08:06:52.548: INFO: Trying to get logs from node node5 pod client-envvars-04f95c95-91a0-11e9-bbf5-0e74dabf3615 container env3cont: +STEP: delete the pod +Jun 18 08:06:52.638: INFO: Waiting for pod client-envvars-04f95c95-91a0-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 08:06:52.647: INFO: Pod client-envvars-04f95c95-91a0-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [k8s.io] Pods + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 08:06:52.647: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-pods-lrztn" for this suite. +Jun 18 08:07:34.670: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 08:07:34.795: INFO: namespace: e2e-tests-pods-lrztn, resource: bindings, ignored listing per whitelist +Jun 18 08:07:35.519: INFO: namespace e2e-tests-pods-lrztn deletion completed in 42.866924567s + +• [SLOW TEST:54.952 seconds] +[k8s.io] Pods +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should contain environment variables for services [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSS +------------------------------ +[sig-storage] ConfigMap + should be consumable from pods in volume with mappings and Item mode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 08:07:35.519: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename configmap +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-configmap-8z42c +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings and Item mode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap with name configmap-test-volume-map-21abe846-91a0-11e9-bbf5-0e74dabf3615 +STEP: Creating a pod to test consume configMaps +Jun 18 08:07:35.727: INFO: Waiting up to 5m0s for pod "pod-configmaps-21ac3ce0-91a0-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-configmap-8z42c" to be "success or failure" +Jun 18 08:07:35.728: INFO: Pod "pod-configmaps-21ac3ce0-91a0-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 1.455123ms +Jun 18 08:07:37.732: INFO: Pod "pod-configmaps-21ac3ce0-91a0-11e9-bbf5-0e74dabf3615": Phase="Running", Reason="", readiness=true. Elapsed: 2.004552612s +Jun 18 08:07:39.750: INFO: Pod "pod-configmaps-21ac3ce0-91a0-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.022919503s +STEP: Saw pod success +Jun 18 08:07:39.750: INFO: Pod "pod-configmaps-21ac3ce0-91a0-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 08:07:39.752: INFO: Trying to get logs from node node5 pod pod-configmaps-21ac3ce0-91a0-11e9-bbf5-0e74dabf3615 container configmap-volume-test: +STEP: delete the pod +Jun 18 08:07:39.765: INFO: Waiting for pod pod-configmaps-21ac3ce0-91a0-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 08:07:40.520: INFO: Pod pod-configmaps-21ac3ce0-91a0-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 08:07:40.520: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-configmap-8z42c" for this suite. +Jun 18 08:07:48.572: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 08:07:48.662: INFO: namespace: e2e-tests-configmap-8z42c, resource: bindings, ignored listing per whitelist +Jun 18 08:07:49.641: INFO: namespace e2e-tests-configmap-8z42c deletion completed in 9.112362422s + +• [SLOW TEST:14.122 seconds] +[sig-storage] ConfigMap +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33 + should be consumable from pods in volume with mappings and Item mode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSS +------------------------------ +[sig-storage] Projected secret + should be consumable from pods in volume with mappings and Item Mode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected secret + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 08:07:49.641: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-rmlh7 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings and Item Mode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating projection with secret that has name projected-secret-test-map-2a8f9a8d-91a0-11e9-bbf5-0e74dabf3615 +STEP: Creating a pod to test consume secrets +Jun 18 08:07:50.642: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-2a8ffe7a-91a0-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-projected-rmlh7" to be "success or failure" +Jun 18 08:07:50.647: INFO: Pod "pod-projected-secrets-2a8ffe7a-91a0-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 4.59793ms +Jun 18 08:07:52.649: INFO: Pod "pod-projected-secrets-2a8ffe7a-91a0-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.007450141s +STEP: Saw pod success +Jun 18 08:07:52.650: INFO: Pod "pod-projected-secrets-2a8ffe7a-91a0-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 08:07:52.654: INFO: Trying to get logs from node node5 pod pod-projected-secrets-2a8ffe7a-91a0-11e9-bbf5-0e74dabf3615 container projected-secret-volume-test: +STEP: delete the pod +Jun 18 08:07:52.668: INFO: Waiting for pod pod-projected-secrets-2a8ffe7a-91a0-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 08:07:52.670: INFO: Pod pod-projected-secrets-2a8ffe7a-91a0-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] Projected secret + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 08:07:52.670: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-rmlh7" for this suite. +Jun 18 08:08:00.694: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 08:08:00.804: INFO: namespace: e2e-tests-projected-rmlh7, resource: bindings, ignored listing per whitelist +Jun 18 08:08:01.006: INFO: namespace e2e-tests-projected-rmlh7 deletion completed in 8.321814645s + +• [SLOW TEST:11.365 seconds] +[sig-storage] Projected secret +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:34 + should be consumable from pods in volume with mappings and Item Mode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Secrets + should be consumable from pods in env vars [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-api-machinery] Secrets + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 08:08:01.007: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename secrets +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-secrets-lxq4q +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in env vars [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating secret with name secret-test-311d9513-91a0-11e9-bbf5-0e74dabf3615 +STEP: Creating a pod to test consume secrets +Jun 18 08:08:01.644: INFO: Waiting up to 5m0s for pod "pod-secrets-311e7cb5-91a0-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-secrets-lxq4q" to be "success or failure" +Jun 18 08:08:01.649: INFO: Pod "pod-secrets-311e7cb5-91a0-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 5.231587ms +Jun 18 08:08:03.652: INFO: Pod "pod-secrets-311e7cb5-91a0-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.008009876s +STEP: Saw pod success +Jun 18 08:08:03.652: INFO: Pod "pod-secrets-311e7cb5-91a0-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 08:08:03.654: INFO: Trying to get logs from node node5 pod pod-secrets-311e7cb5-91a0-11e9-bbf5-0e74dabf3615 container secret-env-test: +STEP: delete the pod +Jun 18 08:08:03.670: INFO: Waiting for pod pod-secrets-311e7cb5-91a0-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 08:08:03.672: INFO: Pod pod-secrets-311e7cb5-91a0-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-api-machinery] Secrets + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 08:08:03.672: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-secrets-lxq4q" for this suite. +Jun 18 08:08:13.683: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 08:08:14.553: INFO: namespace: e2e-tests-secrets-lxq4q, resource: bindings, ignored listing per whitelist +Jun 18 08:08:14.555: INFO: namespace e2e-tests-secrets-lxq4q deletion completed in 10.880600352s + +• [SLOW TEST:13.549 seconds] +[sig-api-machinery] Secrets +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets.go:32 + should be consumable from pods in env vars [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSS +------------------------------ +[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook + should execute poststart http hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Container Lifecycle Hook + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 08:08:14.555: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename container-lifecycle-hook +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-container-lifecycle-hook-blk7p +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] when create a pod with lifecycle hook + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:61 +STEP: create the container to handle the HTTPGet hook request. +[It] should execute poststart http hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: create the pod with lifecycle hook +STEP: check poststart hook +STEP: delete the pod with lifecycle hook +Jun 18 08:08:20.801: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Jun 18 08:08:20.803: INFO: Pod pod-with-poststart-http-hook still exists +Jun 18 08:08:22.804: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Jun 18 08:08:22.806: INFO: Pod pod-with-poststart-http-hook still exists +Jun 18 08:08:24.804: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Jun 18 08:08:24.807: INFO: Pod pod-with-poststart-http-hook still exists +Jun 18 08:08:26.804: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Jun 18 08:08:26.806: INFO: Pod pod-with-poststart-http-hook still exists +Jun 18 08:08:28.804: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Jun 18 08:08:28.807: INFO: Pod pod-with-poststart-http-hook no longer exists +[AfterEach] [k8s.io] Container Lifecycle Hook + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 08:08:28.807: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-container-lifecycle-hook-blk7p" for this suite. +Jun 18 08:08:54.820: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 08:08:54.901: INFO: namespace: e2e-tests-container-lifecycle-hook-blk7p, resource: bindings, ignored listing per whitelist +Jun 18 08:08:55.133: INFO: namespace e2e-tests-container-lifecycle-hook-blk7p deletion completed in 26.322119164s + +• [SLOW TEST:40.577 seconds] +[k8s.io] Container Lifecycle Hook +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + when create a pod with lifecycle hook + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:40 + should execute poststart http hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected downwardAPI + should provide podname only [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 08:08:55.133: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-9k6h2 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39 +[It] should provide podname only [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +Jun 18 08:08:55.677: INFO: Waiting up to 5m0s for pod "downwardapi-volume-515383c8-91a0-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-projected-9k6h2" to be "success or failure" +Jun 18 08:08:55.683: INFO: Pod "downwardapi-volume-515383c8-91a0-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 5.62731ms +Jun 18 08:08:57.686: INFO: Pod "downwardapi-volume-515383c8-91a0-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.008290779s +STEP: Saw pod success +Jun 18 08:08:57.686: INFO: Pod "downwardapi-volume-515383c8-91a0-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure" +Jun 18 08:08:57.687: INFO: Trying to get logs from node node5 pod downwardapi-volume-515383c8-91a0-11e9-bbf5-0e74dabf3615 container client-container: +STEP: delete the pod +Jun 18 08:08:57.702: INFO: Waiting for pod downwardapi-volume-515383c8-91a0-11e9-bbf5-0e74dabf3615 to disappear +Jun 18 08:08:57.704: INFO: Pod downwardapi-volume-515383c8-91a0-11e9-bbf5-0e74dabf3615 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 08:08:57.704: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-9k6h2" for this suite. +Jun 18 08:09:05.715: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 08:09:06.536: INFO: namespace: e2e-tests-projected-9k6h2, resource: bindings, ignored listing per whitelist +Jun 18 08:09:06.632: INFO: namespace e2e-tests-projected-9k6h2 deletion completed in 8.925422858s + +• [SLOW TEST:11.500 seconds] +[sig-storage] Projected downwardAPI +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33 + should provide podname only [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SS +------------------------------ +[k8s.io] Pods + should be updated [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 08:09:06.633: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename pods +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-pods-ssqnx +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:132 +[It] should be updated [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating the pod +STEP: submitting the pod to kubernetes +STEP: verifying the pod is in kubernetes +STEP: updating the pod +Jun 18 08:09:10.526: INFO: Successfully updated pod "pod-update-586482cd-91a0-11e9-bbf5-0e74dabf3615" +STEP: verifying the updated pod is in kubernetes +Jun 18 08:09:10.533: INFO: Pod update OK +[AfterEach] [k8s.io] Pods + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 08:09:10.533: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-pods-ssqnx" for this suite. +Jun 18 08:09:34.554: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 08:09:34.636: INFO: namespace: e2e-tests-pods-ssqnx, resource: bindings, ignored listing per whitelist +Jun 18 08:09:35.526: INFO: namespace e2e-tests-pods-ssqnx deletion completed in 24.984320651s + +• [SLOW TEST:28.893 seconds] +[k8s.io] Pods +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should be updated [NodeConformance] [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Subpath Atomic writer volumes + should support subpaths with configmap pod [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Subpath + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 08:09:35.526: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename subpath +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-subpath-8xss4 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] Atomic writer volumes + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38 +STEP: Setting up data +[It] should support subpaths with configmap pod [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating pod pod-subpath-test-configmap-h9fv +STEP: Creating a pod to test atomic-volume-subpath +Jun 18 08:09:35.739: INFO: Waiting up to 5m0s for pod "pod-subpath-test-configmap-h9fv" in namespace "e2e-tests-subpath-8xss4" to be "success or failure" +Jun 18 08:09:35.742: INFO: Pod "pod-subpath-test-configmap-h9fv": Phase="Pending", Reason="", readiness=false. Elapsed: 2.628331ms +Jun 18 08:09:38.511: INFO: Pod "pod-subpath-test-configmap-h9fv": Phase="Pending", Reason="", readiness=false. Elapsed: 2.771477878s +Jun 18 08:09:40.516: INFO: Pod "pod-subpath-test-configmap-h9fv": Phase="Running", Reason="", readiness=false. Elapsed: 4.776416732s +Jun 18 08:09:42.519: INFO: Pod "pod-subpath-test-configmap-h9fv": Phase="Running", Reason="", readiness=false. Elapsed: 6.779239417s +Jun 18 08:09:44.522: INFO: Pod "pod-subpath-test-configmap-h9fv": Phase="Running", Reason="", readiness=false. Elapsed: 8.782665759s +Jun 18 08:09:46.526: INFO: Pod "pod-subpath-test-configmap-h9fv": Phase="Running", Reason="", readiness=false. Elapsed: 10.786145441s +Jun 18 08:09:48.533: INFO: Pod "pod-subpath-test-configmap-h9fv": Phase="Running", Reason="", readiness=false. Elapsed: 12.793421249s +Jun 18 08:09:50.536: INFO: Pod "pod-subpath-test-configmap-h9fv": Phase="Running", Reason="", readiness=false. Elapsed: 14.796922743s +Jun 18 08:09:52.539: INFO: Pod "pod-subpath-test-configmap-h9fv": Phase="Running", Reason="", readiness=false. Elapsed: 16.799952097s +Jun 18 08:09:54.542: INFO: Pod "pod-subpath-test-configmap-h9fv": Phase="Running", Reason="", readiness=false. Elapsed: 18.80300584s +Jun 18 08:09:56.546: INFO: Pod "pod-subpath-test-configmap-h9fv": Phase="Running", Reason="", readiness=false. Elapsed: 20.806050503s +Jun 18 08:09:58.549: INFO: Pod "pod-subpath-test-configmap-h9fv": Phase="Succeeded", Reason="", readiness=false. Elapsed: 22.809227901s +STEP: Saw pod success +Jun 18 08:09:58.549: INFO: Pod "pod-subpath-test-configmap-h9fv" satisfied condition "success or failure" +Jun 18 08:09:58.551: INFO: Trying to get logs from node node5 pod pod-subpath-test-configmap-h9fv container test-container-subpath-configmap-h9fv: +STEP: delete the pod +Jun 18 08:09:58.568: INFO: Waiting for pod pod-subpath-test-configmap-h9fv to disappear +Jun 18 08:09:58.571: INFO: Pod pod-subpath-test-configmap-h9fv no longer exists +STEP: Deleting pod pod-subpath-test-configmap-h9fv +Jun 18 08:09:58.571: INFO: Deleting pod "pod-subpath-test-configmap-h9fv" in namespace "e2e-tests-subpath-8xss4" +[AfterEach] [sig-storage] Subpath + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 08:09:58.575: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-subpath-8xss4" for this suite. +Jun 18 08:10:06.592: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 08:10:06.648: INFO: namespace: e2e-tests-subpath-8xss4, resource: bindings, ignored listing per whitelist +Jun 18 08:10:07.544: INFO: namespace e2e-tests-subpath-8xss4 deletion completed in 8.961514281s + +• [SLOW TEST:32.018 seconds] +[sig-storage] Subpath +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22 + Atomic writer volumes + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34 + should support subpaths with configmap pod [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSS +------------------------------ +[k8s.io] [sig-node] PreStop + should call prestop when killing a pod [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] [sig-node] PreStop + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 08:10:07.544: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename prestop +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-prestop-lxbch +STEP: Waiting for a default service account to be provisioned in namespace +[It] should call prestop when killing a pod [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating server pod server in namespace e2e-tests-prestop-lxbch +STEP: Waiting for pods to come up. +STEP: Creating tester pod tester in namespace e2e-tests-prestop-lxbch +STEP: Deleting pre-stop pod +Jun 18 08:10:23.559: INFO: Saw: { + "Hostname": "server", + "Sent": null, + "Received": { + "prestop": 1 + }, + "Errors": null, + "Log": [ + "default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up.", + "default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up." + ], + "StillContactingPeers": true +} +STEP: Deleting the server pod +[AfterEach] [k8s.io] [sig-node] PreStop + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +Jun 18 08:10:23.568: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-prestop-lxbch" for this suite. +Jun 18 08:11:07.594: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 18 08:11:08.567: INFO: namespace: e2e-tests-prestop-lxbch, resource: bindings, ignored listing per whitelist +Jun 18 08:11:09.646: INFO: namespace e2e-tests-prestop-lxbch deletion completed in 46.073943403s + +• [SLOW TEST:62.102 seconds] +[k8s.io] [sig-node] PreStop +/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should call prestop when killing a pod [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSS +------------------------------ +[sig-network] Proxy version v1 + should proxy logs on node with explicit kubelet port using proxy subresource [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] version v1 + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +Jun 18 08:11:09.646: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001 +STEP: Building a namespace api object, basename proxy +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-proxy-t89sv +STEP: Waiting for a default service account to be provisioned in namespace +[It] should proxy logs on node with explicit kubelet port using proxy subresource [Conformance] + /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +Jun 18 08:11:12.510: INFO: (0) /api/v1/nodes/node1:10250/proxy/logs/:
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename daemonsets
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-daemonsets-9n6zh
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:102
+[It] should rollback without unnecessary restarts [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+Jun 18 08:11:26.552: INFO: Requires at least 2 nodes (not -1)
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:68
+Jun 18 08:11:26.561: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/e2e-tests-daemonsets-9n6zh/daemonsets","resourceVersion":"13546360"},"items":null}
+
+Jun 18 08:11:26.563: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/e2e-tests-daemonsets-9n6zh/pods","resourceVersion":"13546360"},"items":null}
+
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:11:26.579: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-daemonsets-9n6zh" for this suite.
+Jun 18 08:11:38.598: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:11:40.579: INFO: namespace: e2e-tests-daemonsets-9n6zh, resource: bindings, ignored listing per whitelist
+Jun 18 08:11:40.643: INFO: namespace e2e-tests-daemonsets-9n6zh deletion completed in 14.059719541s
+
+S [SKIPPING] [15.066 seconds]
+[sig-apps] Daemon set [Serial]
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should rollback without unnecessary restarts [Conformance] [It]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+
+  Jun 18 08:11:26.552: Requires at least 2 nodes (not -1)
+
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/util.go:292
+------------------------------
+SSSSSSSSSS
+------------------------------
+[sig-storage] Projected configMap 
+  should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:11:40.643: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-fpn9m
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name projected-configmap-test-volume-map-b43fc292-91a0-11e9-bbf5-0e74dabf3615
+STEP: Creating a pod to test consume configMaps
+Jun 18 08:11:42.602: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-b4c9f1e0-91a0-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-projected-fpn9m" to be "success or failure"
+Jun 18 08:11:42.609: INFO: Pod "pod-projected-configmaps-b4c9f1e0-91a0-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 6.918594ms
+Jun 18 08:11:44.720: INFO: Pod "pod-projected-configmaps-b4c9f1e0-91a0-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.11807929s
+Jun 18 08:11:47.546: INFO: Pod "pod-projected-configmaps-b4c9f1e0-91a0-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.943771642s
+STEP: Saw pod success
+Jun 18 08:11:47.546: INFO: Pod "pod-projected-configmaps-b4c9f1e0-91a0-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:11:47.567: INFO: Trying to get logs from node node5 pod pod-projected-configmaps-b4c9f1e0-91a0-11e9-bbf5-0e74dabf3615 container projected-configmap-volume-test: 
+STEP: delete the pod
+Jun 18 08:11:48.550: INFO: Waiting for pod pod-projected-configmaps-b4c9f1e0-91a0-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:11:48.559: INFO: Pod pod-projected-configmaps-b4c9f1e0-91a0-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:11:48.559: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-fpn9m" for this suite.
+Jun 18 08:11:56.623: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:11:56.724: INFO: namespace: e2e-tests-projected-fpn9m, resource: bindings, ignored listing per whitelist
+Jun 18 08:11:56.934: INFO: namespace e2e-tests-projected-fpn9m deletion completed in 8.349422853s
+
+• [SLOW TEST:16.291 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:34
+  should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSS
+------------------------------
+[sig-scheduling] SchedulerPredicates [Serial] 
+  validates resource limits of pods that are allowed to run  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:11:56.934: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename sched-pred
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-sched-pred-zhl9d
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:79
+Jun 18 08:11:57.511: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready
+Jun 18 08:11:57.518: INFO: Waiting for terminating namespaces to be deleted...
+Jun 18 08:11:57.520: INFO: 
+Logging pods the kubelet thinks is on node node1 before test
+Jun 18 08:11:57.542: INFO: qce-postgres-stolon-sentinel-b6bcb4448-gch5x from qce started at 2019-06-04 11:39:26 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.542: INFO: 	Container stolon ready: true, restart count 0
+Jun 18 08:11:57.542: INFO: prometheus-operator-prometheus-node-exporter-jd657 from kube-system started at 2019-05-16 08:39:36 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.542: INFO: 	Container node-exporter ready: true, restart count 0
+Jun 18 08:11:57.542: INFO: qce-etcd-5665b647b-cjlnd from qce started at 2019-06-04 11:39:26 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.542: INFO: 	Container qce-etcd-etcd ready: true, restart count 0
+Jun 18 08:11:57.542: INFO: mongorsdata-operator-54b67c6cc5-fh4r4 from qiniu-mongors started at 2019-06-04 11:39:26 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.542: INFO: 	Container mongors-operator ready: true, restart count 0
+Jun 18 08:11:57.542: INFO: csi-rbd-ceph-csi-rbd-nodeplugin-r97x2 from default started at 2019-05-14 08:47:33 +0000 UTC (2 container statuses recorded)
+Jun 18 08:11:57.542: INFO: 	Container csi-rbdplugin ready: true, restart count 0
+Jun 18 08:11:57.542: INFO: 	Container driver-registrar ready: true, restart count 0
+Jun 18 08:11:57.542: INFO: qce-authzhook-deploy-75cbd8bc4b-wd28x from qce started at 2019-05-14 10:16:10 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.542: INFO: 	Container qce-authzhook ready: true, restart count 0
+Jun 18 08:11:57.542: INFO: prometheus-prometheus-operator-prometheus-0 from kube-system started at 2019-06-15 09:23:36 +0000 UTC (3 container statuses recorded)
+Jun 18 08:11:57.542: INFO: 	Container prometheus ready: true, restart count 1
+Jun 18 08:11:57.542: INFO: 	Container prometheus-config-reloader ready: true, restart count 0
+Jun 18 08:11:57.542: INFO: 	Container rules-configmap-reloader ready: true, restart count 0
+Jun 18 08:11:57.542: INFO: csirbd-demo-pod from default started at 2019-05-14 08:50:23 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.542: INFO: 	Container web-server ready: true, restart count 0
+Jun 18 08:11:57.542: INFO: redisdata-operator-cdd96dd96-mxcw6 from qiniu-redis started at 2019-06-04 11:39:27 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.542: INFO: 	Container redis-operator ready: true, restart count 0
+Jun 18 08:11:57.542: INFO: qce-mongo-deploy-65f555f54f-2td5v from qce started at 2019-06-04 11:39:26 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.542: INFO: 	Container qce-mongo ready: true, restart count 0
+Jun 18 08:11:57.542: INFO: kube-proxy-4kq5g from kube-system started at 2019-05-14 05:39:01 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.542: INFO: 	Container kube-proxy ready: true, restart count 2
+Jun 18 08:11:57.542: INFO: calico-node-87wc8 from kube-system started at 2019-05-14 06:16:49 +0000 UTC (2 container statuses recorded)
+Jun 18 08:11:57.542: INFO: 	Container calico-node ready: true, restart count 2
+Jun 18 08:11:57.542: INFO: 	Container install-cni ready: true, restart count 2
+Jun 18 08:11:57.542: INFO: csi-cephfs-ceph-csi-cephfs-provisioner-0 from default started at 2019-05-14 08:47:42 +0000 UTC (2 container statuses recorded)
+Jun 18 08:11:57.542: INFO: 	Container csi-cephfsplugin ready: true, restart count 0
+Jun 18 08:11:57.542: INFO: 	Container csi-provisioner ready: true, restart count 0
+Jun 18 08:11:57.542: INFO: qce-postgres-stolon-keeper-1 from qce started at 2019-05-14 09:40:52 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.542: INFO: 	Container stolon ready: true, restart count 0
+Jun 18 08:11:57.542: INFO: alert-dispatcher-58d448f9c9-t5npr from kube-system started at 2019-06-04 11:39:26 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.542: INFO: 	Container alert-dispatcher ready: true, restart count 0
+Jun 18 08:11:57.542: INFO: csi-cephfs-ceph-csi-cephfs-nodeplugin-2smn4 from default started at 2019-05-14 08:47:42 +0000 UTC (2 container statuses recorded)
+Jun 18 08:11:57.542: INFO: 	Container csi-cephfsplugin ready: true, restart count 0
+Jun 18 08:11:57.542: INFO: 	Container driver-registrar ready: true, restart count 0
+Jun 18 08:11:57.542: INFO: logkit-poc-dk8x2 from kube-system started at 2019-05-17 03:17:51 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.542: INFO: 	Container logkit-poc ready: true, restart count 0
+Jun 18 08:11:57.542: INFO: alertmanager-prometheus-operator-alertmanager-1 from kube-system started at 2019-06-15 05:36:24 +0000 UTC (2 container statuses recorded)
+Jun 18 08:11:57.542: INFO: 	Container alertmanager ready: true, restart count 0
+Jun 18 08:11:57.542: INFO: 	Container config-reloader ready: true, restart count 0
+Jun 18 08:11:57.542: INFO: 
+Logging pods the kubelet thinks is on node node2 before test
+Jun 18 08:11:57.557: INFO: logkit-poc-cgpj8 from kube-system started at 2019-05-17 03:17:51 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.557: INFO: 	Container logkit-poc ready: true, restart count 0
+Jun 18 08:11:57.557: INFO: kube-proxy-hm6bg from kube-system started at 2019-05-14 05:39:31 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.557: INFO: 	Container kube-proxy ready: true, restart count 0
+Jun 18 08:11:57.557: INFO: alert-controller-568fb6794d-f9vhm from kube-system started at 2019-06-14 01:20:22 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.557: INFO: 	Container alert-controller ready: true, restart count 0
+Jun 18 08:11:57.557: INFO: qce-postgres-stolon-keeper-0 from qce started at 2019-06-14 23:07:51 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.557: INFO: 	Container stolon ready: true, restart count 0
+Jun 18 08:11:57.557: INFO: redis-operator-b7597fc6c-fhsq9 from qiniu-redis started at 2019-06-06 05:55:00 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.557: INFO: 	Container redis-operator ready: true, restart count 0
+Jun 18 08:11:57.557: INFO: kibana-58f596b5d4-gprzs from kube-system started at 2019-06-09 10:42:30 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.557: INFO: 	Container kibana ready: true, restart count 0
+Jun 18 08:11:57.557: INFO: csi-rbd-ceph-csi-rbd-provisioner-0 from default started at 2019-06-15 04:42:56 +0000 UTC (3 container statuses recorded)
+Jun 18 08:11:57.557: INFO: 	Container csi-provisioner ready: true, restart count 0
+Jun 18 08:11:57.557: INFO: 	Container csi-rbdplugin ready: true, restart count 0
+Jun 18 08:11:57.557: INFO: 	Container csi-snapshotter ready: true, restart count 0
+Jun 18 08:11:57.557: INFO: prometheus-operator-prometheus-node-exporter-ctlvb from kube-system started at 2019-05-16 08:39:36 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.557: INFO: 	Container node-exporter ready: true, restart count 0
+Jun 18 08:11:57.557: INFO: calico-kube-controllers-5ffbcb76cf-km64s from kube-system started at 2019-06-06 06:34:55 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.557: INFO: 	Container calico-kube-controllers ready: true, restart count 0
+Jun 18 08:11:57.557: INFO: qce-clair-6f69f7554d-2hpxb from qce started at 2019-06-08 07:24:41 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.557: INFO: 	Container clair ready: true, restart count 0
+Jun 18 08:11:57.557: INFO: csi-cephfs-ceph-csi-cephfs-attacher-0 from default started at 2019-05-14 08:47:42 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.557: INFO: 	Container csi-cephfsplugin-attacher ready: true, restart count 0
+Jun 18 08:11:57.557: INFO: rabbitmq-operator-845b85b447-qx5nm from qiniu-rabbitmq started at 2019-06-15 05:32:51 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.557: INFO: 	Container rabbitmq-operator ready: true, restart count 0
+Jun 18 08:11:57.557: INFO: calico-node-vfj4h from kube-system started at 2019-05-14 06:16:49 +0000 UTC (2 container statuses recorded)
+Jun 18 08:11:57.557: INFO: 	Container calico-node ready: true, restart count 0
+Jun 18 08:11:57.557: INFO: 	Container install-cni ready: true, restart count 0
+Jun 18 08:11:57.557: INFO: csi-cephfs-ceph-csi-cephfs-nodeplugin-c2hjw from default started at 2019-05-14 08:47:42 +0000 UTC (2 container statuses recorded)
+Jun 18 08:11:57.557: INFO: 	Container csi-cephfsplugin ready: true, restart count 0
+Jun 18 08:11:57.557: INFO: 	Container driver-registrar ready: true, restart count 0
+Jun 18 08:11:57.557: INFO: alert-apiserver-5f887ff458-dcdcn from kube-system started at 2019-06-13 06:50:57 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.557: INFO: 	Container alert-apiserver ready: true, restart count 0
+Jun 18 08:11:57.557: INFO: csi-rbd-ceph-csi-rbd-nodeplugin-mncbd from default started at 2019-05-14 08:47:33 +0000 UTC (2 container statuses recorded)
+Jun 18 08:11:57.557: INFO: 	Container csi-rbdplugin ready: true, restart count 0
+Jun 18 08:11:57.557: INFO: 	Container driver-registrar ready: true, restart count 0
+Jun 18 08:11:57.557: INFO: 
+Logging pods the kubelet thinks is on node node3 before test
+Jun 18 08:11:57.569: INFO: prometheus-operator-prometheus-node-exporter-84pmd from kube-system started at 2019-05-16 08:39:36 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.569: INFO: 	Container node-exporter ready: true, restart count 0
+Jun 18 08:11:57.569: INFO: qce-jenkins-0 from qce started at 2019-06-16 18:40:16 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.569: INFO: 	Container qce-jenkins ready: true, restart count 0
+Jun 18 08:11:57.569: INFO: csi-rbd-ceph-csi-rbd-nodeplugin-gxvpm from default started at 2019-05-14 08:47:33 +0000 UTC (2 container statuses recorded)
+Jun 18 08:11:57.569: INFO: 	Container csi-rbdplugin ready: true, restart count 0
+Jun 18 08:11:57.569: INFO: 	Container driver-registrar ready: true, restart count 0
+Jun 18 08:11:57.569: INFO: logkit-poc-znzg2 from kube-system started at 2019-06-18 06:27:20 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.569: INFO: 	Container logkit-poc ready: true, restart count 0
+Jun 18 08:11:57.569: INFO: kube-proxy-tc77p from kube-system started at 2019-05-14 05:38:50 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.569: INFO: 	Container kube-proxy ready: true, restart count 0
+Jun 18 08:11:57.569: INFO: calico-node-mzvzv from kube-system started at 2019-05-14 06:16:49 +0000 UTC (2 container statuses recorded)
+Jun 18 08:11:57.569: INFO: 	Container calico-node ready: true, restart count 0
+Jun 18 08:11:57.569: INFO: 	Container install-cni ready: true, restart count 0
+Jun 18 08:11:57.569: INFO: mongors-operator-65df599b-wjs4w from qiniu-mongors started at 2019-06-04 11:39:27 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.569: INFO: 	Container mongors-operator ready: true, restart count 0
+Jun 18 08:11:57.569: INFO: qce-portal-deploy-6d799f79df-5lsgc from qce started at 2019-06-17 04:26:28 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.569: INFO: 	Container qce-portal ready: true, restart count 0
+Jun 18 08:11:57.569: INFO: tiller-deploy-555696dfc8-gvznf from kube-system started at 2019-05-14 08:33:12 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.569: INFO: 	Container tiller ready: true, restart count 0
+Jun 18 08:11:57.569: INFO: csi-cephfs-ceph-csi-cephfs-nodeplugin-tnz48 from default started at 2019-05-14 08:47:42 +0000 UTC (2 container statuses recorded)
+Jun 18 08:11:57.569: INFO: 	Container csi-cephfsplugin ready: true, restart count 0
+Jun 18 08:11:57.569: INFO: 	Container driver-registrar ready: true, restart count 0
+Jun 18 08:11:57.569: INFO: qce-postgres-stolon-sentinel-b6bcb4448-c4nmj from qce started at 2019-05-14 09:40:16 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.569: INFO: 	Container stolon ready: true, restart count 0
+Jun 18 08:11:57.569: INFO: qce-postgres-stolon-proxy-78b9bc58d8-pg92h from qce started at 2019-05-14 09:40:16 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.569: INFO: 	Container stolon ready: true, restart count 0
+Jun 18 08:11:57.569: INFO: prometheus-operator-prometheus-blackbox-exporter-5d4cbbf54vzmk6 from kube-system started at 2019-05-16 08:39:36 +0000 UTC (2 container statuses recorded)
+Jun 18 08:11:57.569: INFO: 	Container blackbox-exporter ready: true, restart count 0
+Jun 18 08:11:57.569: INFO: 	Container configmap-reload ready: true, restart count 0
+Jun 18 08:11:57.569: INFO: prometheus-operator-kube-state-metrics-969f69894-p5bbm from kube-system started at 2019-05-16 08:39:36 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.569: INFO: 	Container kube-state-metrics ready: true, restart count 0
+Jun 18 08:11:57.569: INFO: prometheus-operator-grafana-86b99c77dd-cmbdv from kube-system started at 2019-05-16 08:39:36 +0000 UTC (2 container statuses recorded)
+Jun 18 08:11:57.569: INFO: 	Container grafana ready: true, restart count 0
+Jun 18 08:11:57.569: INFO: 	Container grafana-sc-dashboard ready: true, restart count 39
+Jun 18 08:11:57.569: INFO: alert-apiserver-etcd-6d744f7648-llfwf from kube-system started at 2019-06-13 06:49:42 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.569: INFO: 	Container alert-apiserver-etcd ready: true, restart count 0
+Jun 18 08:11:57.569: INFO: 
+Logging pods the kubelet thinks is on node node4 before test
+Jun 18 08:11:57.578: INFO: csi-rbd-ceph-csi-rbd-nodeplugin-q2jtp from default started at 2019-06-16 19:51:06 +0000 UTC (2 container statuses recorded)
+Jun 18 08:11:57.578: INFO: 	Container csi-rbdplugin ready: true, restart count 0
+Jun 18 08:11:57.578: INFO: 	Container driver-registrar ready: true, restart count 0
+Jun 18 08:11:57.578: INFO: kirk-apiserver-doc-6b5f8c7dd8-lm2pv from qce started at 2019-06-18 05:42:55 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.578: INFO: 	Container kirk-apiserver-doc ready: true, restart count 0
+Jun 18 08:11:57.578: INFO: logkit-poc-7shgm from kube-system started at 2019-06-16 19:36:14 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.578: INFO: 	Container logkit-poc ready: true, restart count 0
+Jun 18 08:11:57.578: INFO: csi-cephfs-ceph-csi-cephfs-nodeplugin-7cg42 from default started at 2019-06-16 19:50:32 +0000 UTC (2 container statuses recorded)
+Jun 18 08:11:57.578: INFO: 	Container csi-cephfsplugin ready: true, restart count 0
+Jun 18 08:11:57.578: INFO: 	Container driver-registrar ready: true, restart count 0
+Jun 18 08:11:57.578: INFO: mysqldata-operator-6f447687b6-qdkt8 from qiniu-mysql started at 2019-06-18 03:17:07 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.578: INFO: 	Container mysql-operator ready: true, restart count 0
+Jun 18 08:11:57.578: INFO: prometheus-operator-prometheus-node-exporter-f2zgm from kube-system started at 2019-06-16 19:39:12 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.578: INFO: 	Container node-exporter ready: true, restart count 0
+Jun 18 08:11:57.578: INFO: kube-proxy-2vsgc from kube-system started at 2019-06-16 19:50:32 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.578: INFO: 	Container kube-proxy ready: true, restart count 0
+Jun 18 08:11:57.578: INFO: mysql-operator-v2-645fcc7f6c-l9dtm from qiniu-mysql started at 2019-06-18 03:19:36 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.578: INFO: 	Container mysql-operator ready: true, restart count 0
+Jun 18 08:11:57.578: INFO: elasticsearch-c5cc84d5f-ctdmq from kube-system started at 2019-06-18 06:26:40 +0000 UTC (2 container statuses recorded)
+Jun 18 08:11:57.578: INFO: 	Container elasticsearch ready: true, restart count 0
+Jun 18 08:11:57.578: INFO: 	Container es-rotate ready: true, restart count 0
+Jun 18 08:11:57.578: INFO: calico-node-fhsvk from kube-system started at 2019-06-16 19:53:03 +0000 UTC (2 container statuses recorded)
+Jun 18 08:11:57.578: INFO: 	Container calico-node ready: true, restart count 0
+Jun 18 08:11:57.578: INFO: 	Container install-cni ready: true, restart count 0
+Jun 18 08:11:57.578: INFO: 
+Logging pods the kubelet thinks is on node node5 before test
+Jun 18 08:11:57.587: INFO: calico-node-fmzrt from kube-system started at 2019-05-14 06:16:49 +0000 UTC (2 container statuses recorded)
+Jun 18 08:11:57.587: INFO: 	Container calico-node ready: true, restart count 0
+Jun 18 08:11:57.587: INFO: 	Container install-cni ready: true, restart count 0
+Jun 18 08:11:57.587: INFO: csi-cephfs-ceph-csi-cephfs-nodeplugin-jfmbb from default started at 2019-05-14 08:47:42 +0000 UTC (2 container statuses recorded)
+Jun 18 08:11:57.587: INFO: 	Container csi-cephfsplugin ready: true, restart count 0
+Jun 18 08:11:57.587: INFO: 	Container driver-registrar ready: true, restart count 0
+Jun 18 08:11:57.587: INFO: qce-postgres-stolon-proxy-78b9bc58d8-8pp2x from qce started at 2019-05-14 09:40:16 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.587: INFO: 	Container stolon ready: true, restart count 0
+Jun 18 08:11:57.587: INFO: onetimeurl-controller-745fc87d5d-g58jg from qce started at 2019-05-14 10:16:10 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.587: INFO: 	Container onetimeurl-controller ready: true, restart count 0
+Jun 18 08:11:57.587: INFO: logkit-poc-5z5cm from kube-system started at 2019-05-17 03:17:51 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.587: INFO: 	Container logkit-poc ready: true, restart count 0
+Jun 18 08:11:57.587: INFO: csi-rbd-ceph-csi-rbd-nodeplugin-42fl8 from default started at 2019-05-14 08:47:33 +0000 UTC (2 container statuses recorded)
+Jun 18 08:11:57.587: INFO: 	Container csi-rbdplugin ready: true, restart count 0
+Jun 18 08:11:57.587: INFO: 	Container driver-registrar ready: true, restart count 0
+Jun 18 08:11:57.587: INFO: csi-rbd-ceph-csi-rbd-attacher-0 from default started at 2019-05-14 08:47:33 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.587: INFO: 	Container csi-rbdplugin-attacher ready: true, restart count 0
+Jun 18 08:11:57.587: INFO: sonobuoy from heptio-sonobuoy started at 2019-06-18 07:13:06 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.587: INFO: 	Container kube-sonobuoy ready: true, restart count 0
+Jun 18 08:11:57.587: INFO: alertmanager-prometheus-operator-alertmanager-0 from kube-system started at 2019-05-16 08:39:44 +0000 UTC (2 container statuses recorded)
+Jun 18 08:11:57.587: INFO: 	Container alertmanager ready: true, restart count 0
+Jun 18 08:11:57.587: INFO: 	Container config-reloader ready: true, restart count 0
+Jun 18 08:11:57.587: INFO: sonobuoy-e2e-job-2b96015867f64622 from heptio-sonobuoy started at 2019-06-18 07:13:12 +0000 UTC (2 container statuses recorded)
+Jun 18 08:11:57.587: INFO: 	Container e2e ready: true, restart count 0
+Jun 18 08:11:57.587: INFO: 	Container sonobuoy-worker ready: true, restart count 0
+Jun 18 08:11:57.587: INFO: qce-user-manual-deploy-867778f667-dcl87 from qce started at 2019-05-27 12:26:46 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.587: INFO: 	Container qce-user-manual ready: true, restart count 0
+Jun 18 08:11:57.587: INFO: prometheus-operator-prometheus-node-exporter-9g6lb from kube-system started at 2019-05-16 08:39:36 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.587: INFO: 	Container node-exporter ready: true, restart count 0
+Jun 18 08:11:57.588: INFO: prometheus-prometheus-operator-prometheus-1 from kube-system started at 2019-06-13 11:42:12 +0000 UTC (3 container statuses recorded)
+Jun 18 08:11:57.588: INFO: 	Container prometheus ready: true, restart count 0
+Jun 18 08:11:57.588: INFO: 	Container prometheus-config-reloader ready: true, restart count 0
+Jun 18 08:11:57.588: INFO: 	Container rules-configmap-reloader ready: true, restart count 0
+Jun 18 08:11:57.588: INFO: alert-dispatcher-58d448f9c9-4mxgj from kube-system started at 2019-06-15 12:19:08 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.588: INFO: 	Container alert-dispatcher ready: true, restart count 0
+Jun 18 08:11:57.588: INFO: kube-proxy-lqpj7 from kube-system started at 2019-05-14 05:38:48 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.588: INFO: 	Container kube-proxy ready: true, restart count 0
+Jun 18 08:11:57.588: INFO: qce-postgres-stolon-sentinel-b6bcb4448-jbrkl from qce started at 2019-05-14 09:40:16 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.588: INFO: 	Container stolon ready: true, restart count 0
+Jun 18 08:11:57.588: INFO: prometheus-operator-operator-654b9d4648-lflhd from kube-system started at 2019-05-16 08:39:36 +0000 UTC (1 container statuses recorded)
+Jun 18 08:11:57.588: INFO: 	Container prometheus-operator ready: true, restart count 0
+[It] validates resource limits of pods that are allowed to run  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: verifying the node has the label node node1
+STEP: verifying the node has the label node node2
+STEP: verifying the node has the label node node3
+STEP: verifying the node has the label node node4
+STEP: verifying the node has the label node node5
+Jun 18 08:11:57.652: INFO: Pod csi-cephfs-ceph-csi-cephfs-attacher-0 requesting resource cpu=0m on Node node2
+Jun 18 08:11:57.652: INFO: Pod csi-cephfs-ceph-csi-cephfs-nodeplugin-2smn4 requesting resource cpu=0m on Node node1
+Jun 18 08:11:57.652: INFO: Pod csi-cephfs-ceph-csi-cephfs-nodeplugin-7cg42 requesting resource cpu=0m on Node node4
+Jun 18 08:11:57.652: INFO: Pod csi-cephfs-ceph-csi-cephfs-nodeplugin-c2hjw requesting resource cpu=0m on Node node2
+Jun 18 08:11:57.652: INFO: Pod csi-cephfs-ceph-csi-cephfs-nodeplugin-jfmbb requesting resource cpu=0m on Node node5
+Jun 18 08:11:57.652: INFO: Pod csi-cephfs-ceph-csi-cephfs-nodeplugin-tnz48 requesting resource cpu=0m on Node node3
+Jun 18 08:11:57.652: INFO: Pod csi-cephfs-ceph-csi-cephfs-provisioner-0 requesting resource cpu=0m on Node node1
+Jun 18 08:11:57.652: INFO: Pod csi-rbd-ceph-csi-rbd-attacher-0 requesting resource cpu=0m on Node node5
+Jun 18 08:11:57.652: INFO: Pod csi-rbd-ceph-csi-rbd-nodeplugin-42fl8 requesting resource cpu=0m on Node node5
+Jun 18 08:11:57.652: INFO: Pod csi-rbd-ceph-csi-rbd-nodeplugin-gxvpm requesting resource cpu=0m on Node node3
+Jun 18 08:11:57.652: INFO: Pod csi-rbd-ceph-csi-rbd-nodeplugin-mncbd requesting resource cpu=0m on Node node2
+Jun 18 08:11:57.652: INFO: Pod csi-rbd-ceph-csi-rbd-nodeplugin-q2jtp requesting resource cpu=0m on Node node4
+Jun 18 08:11:57.652: INFO: Pod csi-rbd-ceph-csi-rbd-nodeplugin-r97x2 requesting resource cpu=0m on Node node1
+Jun 18 08:11:57.652: INFO: Pod csi-rbd-ceph-csi-rbd-provisioner-0 requesting resource cpu=0m on Node node2
+Jun 18 08:11:57.652: INFO: Pod csirbd-demo-pod requesting resource cpu=0m on Node node1
+Jun 18 08:11:57.652: INFO: Pod sonobuoy requesting resource cpu=0m on Node node5
+Jun 18 08:11:57.652: INFO: Pod sonobuoy-e2e-job-2b96015867f64622 requesting resource cpu=0m on Node node5
+Jun 18 08:11:57.652: INFO: Pod alert-apiserver-5f887ff458-dcdcn requesting resource cpu=0m on Node node2
+Jun 18 08:11:57.652: INFO: Pod alert-apiserver-etcd-6d744f7648-llfwf requesting resource cpu=0m on Node node3
+Jun 18 08:11:57.652: INFO: Pod alert-controller-568fb6794d-f9vhm requesting resource cpu=200m on Node node2
+Jun 18 08:11:57.652: INFO: Pod alert-dispatcher-58d448f9c9-4mxgj requesting resource cpu=200m on Node node5
+Jun 18 08:11:57.652: INFO: Pod alert-dispatcher-58d448f9c9-t5npr requesting resource cpu=200m on Node node1
+Jun 18 08:11:57.652: INFO: Pod alertmanager-prometheus-operator-alertmanager-0 requesting resource cpu=5m on Node node5
+Jun 18 08:11:57.652: INFO: Pod alertmanager-prometheus-operator-alertmanager-1 requesting resource cpu=5m on Node node1
+Jun 18 08:11:57.652: INFO: Pod calico-kube-controllers-5ffbcb76cf-km64s requesting resource cpu=0m on Node node2
+Jun 18 08:11:57.652: INFO: Pod calico-node-87wc8 requesting resource cpu=250m on Node node1
+Jun 18 08:11:57.652: INFO: Pod calico-node-fhsvk requesting resource cpu=250m on Node node4
+Jun 18 08:11:57.652: INFO: Pod calico-node-fmzrt requesting resource cpu=250m on Node node5
+Jun 18 08:11:57.652: INFO: Pod calico-node-mzvzv requesting resource cpu=250m on Node node3
+Jun 18 08:11:57.652: INFO: Pod calico-node-vfj4h requesting resource cpu=250m on Node node2
+Jun 18 08:11:57.652: INFO: Pod elasticsearch-c5cc84d5f-ctdmq requesting resource cpu=2000m on Node node4
+Jun 18 08:11:57.652: INFO: Pod kibana-58f596b5d4-gprzs requesting resource cpu=100m on Node node2
+Jun 18 08:11:57.652: INFO: Pod kube-proxy-2vsgc requesting resource cpu=0m on Node node4
+Jun 18 08:11:57.652: INFO: Pod kube-proxy-4kq5g requesting resource cpu=0m on Node node1
+Jun 18 08:11:57.652: INFO: Pod kube-proxy-hm6bg requesting resource cpu=0m on Node node2
+Jun 18 08:11:57.652: INFO: Pod kube-proxy-lqpj7 requesting resource cpu=0m on Node node5
+Jun 18 08:11:57.652: INFO: Pod kube-proxy-tc77p requesting resource cpu=0m on Node node3
+Jun 18 08:11:57.652: INFO: Pod logkit-poc-5z5cm requesting resource cpu=512m on Node node5
+Jun 18 08:11:57.652: INFO: Pod logkit-poc-7shgm requesting resource cpu=512m on Node node4
+Jun 18 08:11:57.652: INFO: Pod logkit-poc-cgpj8 requesting resource cpu=512m on Node node2
+Jun 18 08:11:57.652: INFO: Pod logkit-poc-dk8x2 requesting resource cpu=512m on Node node1
+Jun 18 08:11:57.652: INFO: Pod logkit-poc-znzg2 requesting resource cpu=512m on Node node3
+Jun 18 08:11:57.652: INFO: Pod prometheus-operator-grafana-86b99c77dd-cmbdv requesting resource cpu=1050m on Node node3
+Jun 18 08:11:57.652: INFO: Pod prometheus-operator-kube-state-metrics-969f69894-p5bbm requesting resource cpu=100m on Node node3
+Jun 18 08:11:57.652: INFO: Pod prometheus-operator-operator-654b9d4648-lflhd requesting resource cpu=100m on Node node5
+Jun 18 08:11:57.652: INFO: Pod prometheus-operator-prometheus-blackbox-exporter-5d4cbbf54vzmk6 requesting resource cpu=200m on Node node3
+Jun 18 08:11:57.652: INFO: Pod prometheus-operator-prometheus-node-exporter-84pmd requesting resource cpu=100m on Node node3
+Jun 18 08:11:57.652: INFO: Pod prometheus-operator-prometheus-node-exporter-9g6lb requesting resource cpu=100m on Node node5
+Jun 18 08:11:57.652: INFO: Pod prometheus-operator-prometheus-node-exporter-ctlvb requesting resource cpu=100m on Node node2
+Jun 18 08:11:57.652: INFO: Pod prometheus-operator-prometheus-node-exporter-f2zgm requesting resource cpu=100m on Node node4
+Jun 18 08:11:57.652: INFO: Pod prometheus-operator-prometheus-node-exporter-jd657 requesting resource cpu=100m on Node node1
+Jun 18 08:11:57.652: INFO: Pod prometheus-prometheus-operator-prometheus-0 requesting resource cpu=20m on Node node1
+Jun 18 08:11:57.652: INFO: Pod prometheus-prometheus-operator-prometheus-1 requesting resource cpu=20m on Node node5
+Jun 18 08:11:57.652: INFO: Pod tiller-deploy-555696dfc8-gvznf requesting resource cpu=0m on Node node3
+Jun 18 08:11:57.652: INFO: Pod kirk-apiserver-doc-6b5f8c7dd8-lm2pv requesting resource cpu=0m on Node node4
+Jun 18 08:11:57.652: INFO: Pod onetimeurl-controller-745fc87d5d-g58jg requesting resource cpu=0m on Node node5
+Jun 18 08:11:57.652: INFO: Pod qce-authzhook-deploy-75cbd8bc4b-wd28x requesting resource cpu=500m on Node node1
+Jun 18 08:11:57.652: INFO: Pod qce-clair-6f69f7554d-2hpxb requesting resource cpu=100m on Node node2
+Jun 18 08:11:57.652: INFO: Pod qce-etcd-5665b647b-cjlnd requesting resource cpu=0m on Node node1
+Jun 18 08:11:57.652: INFO: Pod qce-jenkins-0 requesting resource cpu=0m on Node node3
+Jun 18 08:11:57.652: INFO: Pod qce-mongo-deploy-65f555f54f-2td5v requesting resource cpu=0m on Node node1
+Jun 18 08:11:57.652: INFO: Pod qce-portal-deploy-6d799f79df-5lsgc requesting resource cpu=0m on Node node3
+Jun 18 08:11:57.652: INFO: Pod qce-postgres-stolon-keeper-0 requesting resource cpu=0m on Node node2
+Jun 18 08:11:57.652: INFO: Pod qce-postgres-stolon-keeper-1 requesting resource cpu=0m on Node node1
+Jun 18 08:11:57.652: INFO: Pod qce-postgres-stolon-proxy-78b9bc58d8-8pp2x requesting resource cpu=0m on Node node5
+Jun 18 08:11:57.652: INFO: Pod qce-postgres-stolon-proxy-78b9bc58d8-pg92h requesting resource cpu=0m on Node node3
+Jun 18 08:11:57.652: INFO: Pod qce-postgres-stolon-sentinel-b6bcb4448-c4nmj requesting resource cpu=0m on Node node3
+Jun 18 08:11:57.652: INFO: Pod qce-postgres-stolon-sentinel-b6bcb4448-gch5x requesting resource cpu=0m on Node node1
+Jun 18 08:11:57.652: INFO: Pod qce-postgres-stolon-sentinel-b6bcb4448-jbrkl requesting resource cpu=0m on Node node5
+Jun 18 08:11:57.652: INFO: Pod qce-user-manual-deploy-867778f667-dcl87 requesting resource cpu=0m on Node node5
+Jun 18 08:11:57.652: INFO: Pod mongors-operator-65df599b-wjs4w requesting resource cpu=300m on Node node3
+Jun 18 08:11:57.652: INFO: Pod mongorsdata-operator-54b67c6cc5-fh4r4 requesting resource cpu=300m on Node node1
+Jun 18 08:11:57.652: INFO: Pod mysql-operator-v2-645fcc7f6c-l9dtm requesting resource cpu=300m on Node node4
+Jun 18 08:11:57.652: INFO: Pod mysqldata-operator-6f447687b6-qdkt8 requesting resource cpu=300m on Node node4
+Jun 18 08:11:57.652: INFO: Pod rabbitmq-operator-845b85b447-qx5nm requesting resource cpu=300m on Node node2
+Jun 18 08:11:57.652: INFO: Pod redis-operator-b7597fc6c-fhsq9 requesting resource cpu=300m on Node node2
+Jun 18 08:11:57.652: INFO: Pod redisdata-operator-cdd96dd96-mxcw6 requesting resource cpu=300m on Node node1
+STEP: Starting Pods to consume most of the cluster CPU.
+STEP: Creating another pod that requires unavailable amount of CPU.
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-bdcb6021-91a0-11e9-bbf5-0e74dabf3615.15a93cba8548ee21], Reason = [Scheduled], Message = [Successfully assigned e2e-tests-sched-pred-zhl9d/filler-pod-bdcb6021-91a0-11e9-bbf5-0e74dabf3615 to node1]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-bdcb6021-91a0-11e9-bbf5-0e74dabf3615.15a93cbac1673828], Reason = [Pulling], Message = [pulling image "reg.kpaas.io/pause:3.1"]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-bdcb6021-91a0-11e9-bbf5-0e74dabf3615.15a93cbaffc7758e], Reason = [Pulled], Message = [Successfully pulled image "reg.kpaas.io/pause:3.1"]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-bdcb6021-91a0-11e9-bbf5-0e74dabf3615.15a93cbb00cff87f], Reason = [Created], Message = [Created container]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-bdcb6021-91a0-11e9-bbf5-0e74dabf3615.15a93cbb05753027], Reason = [Started], Message = [Started container]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-bdcc229d-91a0-11e9-bbf5-0e74dabf3615.15a93cba85c2a670], Reason = [Scheduled], Message = [Successfully assigned e2e-tests-sched-pred-zhl9d/filler-pod-bdcc229d-91a0-11e9-bbf5-0e74dabf3615 to node2]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-bdcc229d-91a0-11e9-bbf5-0e74dabf3615.15a93cbac2206d11], Reason = [Pulling], Message = [pulling image "reg.kpaas.io/pause:3.1"]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-bdcc229d-91a0-11e9-bbf5-0e74dabf3615.15a93cbb061929d3], Reason = [Pulled], Message = [Successfully pulled image "reg.kpaas.io/pause:3.1"]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-bdcc229d-91a0-11e9-bbf5-0e74dabf3615.15a93cbb075f6f09], Reason = [Created], Message = [Created container]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-bdcc229d-91a0-11e9-bbf5-0e74dabf3615.15a93cbb0b35e7f5], Reason = [Started], Message = [Started container]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-bdcc9808-91a0-11e9-bbf5-0e74dabf3615.15a93cba85c178d4], Reason = [Scheduled], Message = [Successfully assigned e2e-tests-sched-pred-zhl9d/filler-pod-bdcc9808-91a0-11e9-bbf5-0e74dabf3615 to node3]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-bdcc9808-91a0-11e9-bbf5-0e74dabf3615.15a93cbabf630f53], Reason = [Pulling], Message = [pulling image "reg.kpaas.io/pause:3.1"]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-bdcc9808-91a0-11e9-bbf5-0e74dabf3615.15a93cbb796c6856], Reason = [Pulled], Message = [Successfully pulled image "reg.kpaas.io/pause:3.1"]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-bdcc9808-91a0-11e9-bbf5-0e74dabf3615.15a93cbb7aa02bcd], Reason = [Created], Message = [Created container]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-bdcc9808-91a0-11e9-bbf5-0e74dabf3615.15a93cbb7f57ca3a], Reason = [Started], Message = [Started container]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-bdcd0b57-91a0-11e9-bbf5-0e74dabf3615.15a93cba85e49833], Reason = [Scheduled], Message = [Successfully assigned e2e-tests-sched-pred-zhl9d/filler-pod-bdcd0b57-91a0-11e9-bbf5-0e74dabf3615 to node4]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-bdcd0b57-91a0-11e9-bbf5-0e74dabf3615.15a93cbabfa9f19e], Reason = [Pulling], Message = [pulling image "reg.kpaas.io/pause:3.1"]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-bdcd0b57-91a0-11e9-bbf5-0e74dabf3615.15a93cbb743b4ac0], Reason = [Pulled], Message = [Successfully pulled image "reg.kpaas.io/pause:3.1"]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-bdcd0b57-91a0-11e9-bbf5-0e74dabf3615.15a93cbb756d3060], Reason = [Created], Message = [Created container]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-bdcd0b57-91a0-11e9-bbf5-0e74dabf3615.15a93cbb79e8e93d], Reason = [Started], Message = [Started container]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-bdcdde02-91a0-11e9-bbf5-0e74dabf3615.15a93cba86536680], Reason = [Scheduled], Message = [Successfully assigned e2e-tests-sched-pred-zhl9d/filler-pod-bdcdde02-91a0-11e9-bbf5-0e74dabf3615 to node5]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-bdcdde02-91a0-11e9-bbf5-0e74dabf3615.15a93cbabfbd7f10], Reason = [Pulled], Message = [Container image "reg.kpaas.io/pause:3.1" already present on machine]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-bdcdde02-91a0-11e9-bbf5-0e74dabf3615.15a93cbac1966bfb], Reason = [Created], Message = [Created container]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-bdcdde02-91a0-11e9-bbf5-0e74dabf3615.15a93cbac603fa38], Reason = [Started], Message = [Started container]
+STEP: Considering event: 
+Type = [Warning], Name = [additional-pod.15a93cbbedab88b4], Reason = [FailedScheduling], Message = [0/6 nodes are available: 1 node(s) had taints that the pod didn't tolerate, 5 Insufficient cpu.]
+STEP: removing the label node off the node node3
+STEP: verifying the node doesn't have the label node
+STEP: removing the label node off the node node4
+STEP: verifying the node doesn't have the label node
+STEP: removing the label node off the node node5
+STEP: verifying the node doesn't have the label node
+STEP: removing the label node off the node node1
+STEP: verifying the node doesn't have the label node
+STEP: removing the label node off the node node2
+STEP: verifying the node doesn't have the label node
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:12:05.650: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-sched-pred-zhl9d" for this suite.
+Jun 18 08:12:13.674: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:12:14.662: INFO: namespace: e2e-tests-sched-pred-zhl9d, resource: bindings, ignored listing per whitelist
+Jun 18 08:12:14.670: INFO: namespace e2e-tests-sched-pred-zhl9d deletion completed in 9.015770992s
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:70
+
+• [SLOW TEST:17.736 seconds]
+[sig-scheduling] SchedulerPredicates [Serial]
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:22
+  validates resource limits of pods that are allowed to run  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSS
+------------------------------
+[k8s.io] KubeletManagedEtcHosts 
+  should test kubelet managed /etc/hosts file [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] KubeletManagedEtcHosts
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:12:14.670: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename e2e-kubelet-etc-hosts
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-e2e-kubelet-etc-hosts-tlk8c
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should test kubelet managed /etc/hosts file [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Setting up the test
+STEP: Creating hostNetwork=false pod
+STEP: Creating hostNetwork=true pod
+STEP: Running the test
+STEP: Verifying /etc/hosts of container is kubelet-managed for pod with hostNetwork=false
+Jun 18 08:12:19.553: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-tests-e2e-kubelet-etc-hosts-tlk8c PodName:test-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 18 08:12:19.553: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+Jun 18 08:12:19.654: INFO: Exec stderr: ""
+Jun 18 08:12:19.654: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-tests-e2e-kubelet-etc-hosts-tlk8c PodName:test-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 18 08:12:19.654: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+Jun 18 08:12:20.511: INFO: Exec stderr: ""
+Jun 18 08:12:20.511: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-tests-e2e-kubelet-etc-hosts-tlk8c PodName:test-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 18 08:12:20.511: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+Jun 18 08:12:20.590: INFO: Exec stderr: ""
+Jun 18 08:12:20.590: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-tests-e2e-kubelet-etc-hosts-tlk8c PodName:test-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 18 08:12:20.590: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+Jun 18 08:12:21.509: INFO: Exec stderr: ""
+STEP: Verifying /etc/hosts of container is not kubelet-managed since container specifies /etc/hosts mount
+Jun 18 08:12:21.509: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-tests-e2e-kubelet-etc-hosts-tlk8c PodName:test-pod ContainerName:busybox-3 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 18 08:12:21.509: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+Jun 18 08:12:21.591: INFO: Exec stderr: ""
+Jun 18 08:12:21.591: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-tests-e2e-kubelet-etc-hosts-tlk8c PodName:test-pod ContainerName:busybox-3 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 18 08:12:21.591: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+Jun 18 08:12:21.665: INFO: Exec stderr: ""
+STEP: Verifying /etc/hosts content of container is not kubelet-managed for pod with hostNetwork=true
+Jun 18 08:12:21.665: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-tests-e2e-kubelet-etc-hosts-tlk8c PodName:test-host-network-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 18 08:12:21.665: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+Jun 18 08:12:21.748: INFO: Exec stderr: ""
+Jun 18 08:12:21.748: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-tests-e2e-kubelet-etc-hosts-tlk8c PodName:test-host-network-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 18 08:12:21.748: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+Jun 18 08:12:21.819: INFO: Exec stderr: ""
+Jun 18 08:12:21.820: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-tests-e2e-kubelet-etc-hosts-tlk8c PodName:test-host-network-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 18 08:12:21.820: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+Jun 18 08:12:22.511: INFO: Exec stderr: ""
+Jun 18 08:12:22.511: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-tests-e2e-kubelet-etc-hosts-tlk8c PodName:test-host-network-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 18 08:12:22.511: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+Jun 18 08:12:22.592: INFO: Exec stderr: ""
+[AfterEach] [k8s.io] KubeletManagedEtcHosts
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:12:22.592: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-e2e-kubelet-etc-hosts-tlk8c" for this suite.
+Jun 18 08:13:12.607: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:13:12.730: INFO: namespace: e2e-tests-e2e-kubelet-etc-hosts-tlk8c, resource: bindings, ignored listing per whitelist
+Jun 18 08:13:12.918: INFO: namespace e2e-tests-e2e-kubelet-etc-hosts-tlk8c deletion completed in 50.321948327s
+
+• [SLOW TEST:58.248 seconds]
+[k8s.io] KubeletManagedEtcHosts
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should test kubelet managed /etc/hosts file [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSS
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:13:12.918: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename gc
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-gc-8dfpd
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: create the deployment
+STEP: Wait for the Deployment to create new ReplicaSet
+STEP: delete the deployment
+STEP: wait for 30 seconds to see if the garbage collector mistakenly deletes the rs
+STEP: Gathering metrics
+W0618 08:13:44.557667      16 metrics_grabber.go:81] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
+Jun 18 08:13:44.557: INFO: For apiserver_request_count:
+For apiserver_request_latencies_summary:
+For etcd_helper_cache_entry_count:
+For etcd_helper_cache_hit_count:
+For etcd_helper_cache_miss_count:
+For etcd_request_cache_add_latencies_summary:
+For etcd_request_cache_get_latencies_summary:
+For etcd_request_latencies_summary:
+For garbage_collector_attempt_to_delete_queue_latency:
+For garbage_collector_attempt_to_delete_work_duration:
+For garbage_collector_attempt_to_orphan_queue_latency:
+For garbage_collector_attempt_to_orphan_work_duration:
+For garbage_collector_dirty_processing_latency_microseconds:
+For garbage_collector_event_processing_latency_microseconds:
+For garbage_collector_graph_changes_queue_latency:
+For garbage_collector_graph_changes_work_duration:
+For garbage_collector_orphan_processing_latency_microseconds:
+For namespace_queue_latency:
+For namespace_queue_latency_sum:
+For namespace_queue_latency_count:
+For namespace_retries:
+For namespace_work_duration:
+For namespace_work_duration_sum:
+For namespace_work_duration_count:
+For function_duration_seconds:
+For errors_total:
+For evicted_pods_total:
+
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:13:44.557: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-gc-8dfpd" for this suite.
+Jun 18 08:13:52.577: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:13:52.623: INFO: namespace: e2e-tests-gc-8dfpd, resource: bindings, ignored listing per whitelist
+Jun 18 08:13:53.518: INFO: namespace e2e-tests-gc-8dfpd deletion completed in 8.9576611s
+
+• [SLOW TEST:40.600 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSS
+------------------------------
+[sig-storage] ConfigMap 
+  updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:13:53.519: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename configmap
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-configmap-5gb2j
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name configmap-test-upd-02f7354c-91a1-11e9-bbf5-0e74dabf3615
+STEP: Creating the pod
+STEP: Updating configmap configmap-test-upd-02f7354c-91a1-11e9-bbf5-0e74dabf3615
+STEP: waiting to observe update in volume
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:15:16.819: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-configmap-5gb2j" for this suite.
+Jun 18 08:15:42.833: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:15:43.664: INFO: namespace: e2e-tests-configmap-5gb2j, resource: bindings, ignored listing per whitelist
+Jun 18 08:15:45.589: INFO: namespace e2e-tests-configmap-5gb2j deletion completed in 28.766089894s
+
+• [SLOW TEST:112.071 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33
+  updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SS
+------------------------------
+[sig-storage] EmptyDir wrapper volumes 
+  should not cause race condition when used for configmaps [Serial] [Slow] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] EmptyDir wrapper volumes
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:15:45.590: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename emptydir-wrapper
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-wrapper-k9q29
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should not cause race condition when used for configmaps [Serial] [Slow] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating 50 configmaps
+STEP: Creating RC which spawns configmap-volume pods
+Jun 18 08:15:47.601: INFO: Pod name wrapped-volume-race-46d38815-91a1-11e9-bbf5-0e74dabf3615: Found 0 pods out of 5
+Jun 18 08:15:52.606: INFO: Pod name wrapped-volume-race-46d38815-91a1-11e9-bbf5-0e74dabf3615: Found 5 pods out of 5
+STEP: Ensuring each pod is running
+STEP: deleting ReplicationController wrapped-volume-race-46d38815-91a1-11e9-bbf5-0e74dabf3615 in namespace e2e-tests-emptydir-wrapper-k9q29, will wait for the garbage collector to delete the pods
+Jun 18 08:17:48.694: INFO: Deleting ReplicationController wrapped-volume-race-46d38815-91a1-11e9-bbf5-0e74dabf3615 took: 7.098196ms
+Jun 18 08:17:48.795: INFO: Terminating ReplicationController wrapped-volume-race-46d38815-91a1-11e9-bbf5-0e74dabf3615 pods took: 100.254057ms
+STEP: Creating RC which spawns configmap-volume pods
+Jun 18 08:18:28.907: INFO: Pod name wrapped-volume-race-a6fe3c80-91a1-11e9-bbf5-0e74dabf3615: Found 0 pods out of 5
+Jun 18 08:18:34.525: INFO: Pod name wrapped-volume-race-a6fe3c80-91a1-11e9-bbf5-0e74dabf3615: Found 5 pods out of 5
+STEP: Ensuring each pod is running
+STEP: deleting ReplicationController wrapped-volume-race-a6fe3c80-91a1-11e9-bbf5-0e74dabf3615 in namespace e2e-tests-emptydir-wrapper-k9q29, will wait for the garbage collector to delete the pods
+Jun 18 08:21:04.643: INFO: Deleting ReplicationController wrapped-volume-race-a6fe3c80-91a1-11e9-bbf5-0e74dabf3615 took: 6.412831ms
+Jun 18 08:21:05.649: INFO: Terminating ReplicationController wrapped-volume-race-a6fe3c80-91a1-11e9-bbf5-0e74dabf3615 pods took: 1.005807481s
+STEP: Creating RC which spawns configmap-volume pods
+Jun 18 08:21:52.863: INFO: Pod name wrapped-volume-race-208f3d2e-91a2-11e9-bbf5-0e74dabf3615: Found 0 pods out of 5
+Jun 18 08:21:57.868: INFO: Pod name wrapped-volume-race-208f3d2e-91a2-11e9-bbf5-0e74dabf3615: Found 5 pods out of 5
+STEP: Ensuring each pod is running
+STEP: deleting ReplicationController wrapped-volume-race-208f3d2e-91a2-11e9-bbf5-0e74dabf3615 in namespace e2e-tests-emptydir-wrapper-k9q29, will wait for the garbage collector to delete the pods
+Jun 18 08:24:11.967: INFO: Deleting ReplicationController wrapped-volume-race-208f3d2e-91a2-11e9-bbf5-0e74dabf3615 took: 5.100789ms
+Jun 18 08:24:12.567: INFO: Terminating ReplicationController wrapped-volume-race-208f3d2e-91a2-11e9-bbf5-0e74dabf3615 pods took: 600.298722ms
+STEP: Cleaning up the configMaps
+[AfterEach] [sig-storage] EmptyDir wrapper volumes
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:25:11.635: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-emptydir-wrapper-k9q29" for this suite.
+Jun 18 08:25:21.656: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:25:21.800: INFO: namespace: e2e-tests-emptydir-wrapper-k9q29, resource: bindings, ignored listing per whitelist
+Jun 18 08:25:21.980: INFO: namespace e2e-tests-emptydir-wrapper-k9q29 deletion completed in 10.339565097s
+
+• [SLOW TEST:576.391 seconds]
+[sig-storage] EmptyDir wrapper volumes
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22
+  should not cause race condition when used for configmaps [Serial] [Slow] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should provide container's memory limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:25:21.980: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-7hmlg
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should provide container's memory limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward API volume plugin
+Jun 18 08:25:22.663: INFO: Waiting up to 5m0s for pod "downwardapi-volume-9d9d6290-91a2-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-downward-api-7hmlg" to be "success or failure"
+Jun 18 08:25:22.665: INFO: Pod "downwardapi-volume-9d9d6290-91a2-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.396111ms
+Jun 18 08:25:24.670: INFO: Pod "downwardapi-volume-9d9d6290-91a2-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.007477968s
+Jun 18 08:25:26.675: INFO: Pod "downwardapi-volume-9d9d6290-91a2-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.012255303s
+STEP: Saw pod success
+Jun 18 08:25:26.675: INFO: Pod "downwardapi-volume-9d9d6290-91a2-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:25:26.678: INFO: Trying to get logs from node node5 pod downwardapi-volume-9d9d6290-91a2-11e9-bbf5-0e74dabf3615 container client-container: 
+STEP: delete the pod
+Jun 18 08:25:26.694: INFO: Waiting for pod downwardapi-volume-9d9d6290-91a2-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:25:26.695: INFO: Pod downwardapi-volume-9d9d6290-91a2-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:25:26.695: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-downward-api-7hmlg" for this suite.
+Jun 18 08:25:35.537: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:25:35.569: INFO: namespace: e2e-tests-downward-api-7hmlg, resource: bindings, ignored listing per whitelist
+Jun 18 08:25:36.524: INFO: namespace e2e-tests-downward-api-7hmlg deletion completed in 9.825513427s
+
+• [SLOW TEST:14.543 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should provide container's memory limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSS
+------------------------------
+[sig-storage] Projected secret 
+  should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected secret
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:25:36.524: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-rmhqp
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating projection with secret that has name projected-secret-test-a60116f3-91a2-11e9-bbf5-0e74dabf3615
+STEP: Creating a pod to test consume secrets
+Jun 18 08:25:36.741: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-a6018609-91a2-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-projected-rmhqp" to be "success or failure"
+Jun 18 08:25:36.744: INFO: Pod "pod-projected-secrets-a6018609-91a2-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.806115ms
+Jun 18 08:25:38.746: INFO: Pod "pod-projected-secrets-a6018609-91a2-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.005090453s
+Jun 18 08:25:40.749: INFO: Pod "pod-projected-secrets-a6018609-91a2-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.007667552s
+STEP: Saw pod success
+Jun 18 08:25:40.749: INFO: Pod "pod-projected-secrets-a6018609-91a2-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:25:40.751: INFO: Trying to get logs from node node5 pod pod-projected-secrets-a6018609-91a2-11e9-bbf5-0e74dabf3615 container projected-secret-volume-test: 
+STEP: delete the pod
+Jun 18 08:25:40.771: INFO: Waiting for pod pod-projected-secrets-a6018609-91a2-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:25:40.777: INFO: Pod pod-projected-secrets-a6018609-91a2-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] Projected secret
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:25:40.777: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-rmhqp" for this suite.
+Jun 18 08:25:51.530: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:25:51.569: INFO: namespace: e2e-tests-projected-rmhqp, resource: bindings, ignored listing per whitelist
+Jun 18 08:25:52.516: INFO: namespace e2e-tests-projected-rmhqp deletion completed in 11.733463413s
+
+• [SLOW TEST:15.992 seconds]
+[sig-storage] Projected secret
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:34
+  should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[k8s.io] Pods 
+  should support remote command execution over websockets [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:25:52.516: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename pods
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-pods-zfc64
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:132
+[It] should support remote command execution over websockets [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+Jun 18 08:25:53.510: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: creating the pod
+STEP: submitting the pod to kubernetes
+[AfterEach] [k8s.io] Pods
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:25:59.508: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-pods-zfc64" for this suite.
+Jun 18 08:26:43.587: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:26:44.560: INFO: namespace: e2e-tests-pods-zfc64, resource: bindings, ignored listing per whitelist
+Jun 18 08:26:44.691: INFO: namespace e2e-tests-pods-zfc64 deletion completed in 45.163851506s
+
+• [SLOW TEST:52.175 seconds]
+[k8s.io] Pods
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should support remote command execution over websockets [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Probing container 
+  should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:26:44.691: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename container-probe
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-container-probe-5tp54
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:48
+[It] should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating pod liveness-http in namespace e2e-tests-container-probe-5tp54
+Jun 18 08:26:49.694: INFO: Started pod liveness-http in namespace e2e-tests-container-probe-5tp54
+STEP: checking the pod's current state and verifying that restartCount is present
+Jun 18 08:26:49.698: INFO: Initial restart count of pod liveness-http is 0
+STEP: deleting the pod
+[AfterEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:30:51.559: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-container-probe-5tp54" for this suite.
+Jun 18 08:30:59.581: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:30:59.631: INFO: namespace: e2e-tests-container-probe-5tp54, resource: bindings, ignored listing per whitelist
+Jun 18 08:31:00.575: INFO: namespace e2e-tests-container-probe-5tp54 deletion completed in 9.007439236s
+
+• [SLOW TEST:255.884 seconds]
+[k8s.io] Probing container
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected secret 
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected secret
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:31:00.575: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-q2tlf
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating projection with secret that has name projected-secret-test-map-6844418b-91a3-11e9-bbf5-0e74dabf3615
+STEP: Creating a pod to test consume secrets
+Jun 18 08:31:02.671: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-68468b7d-91a3-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-projected-q2tlf" to be "success or failure"
+Jun 18 08:31:02.679: INFO: Pod "pod-projected-secrets-68468b7d-91a3-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 8.152342ms
+Jun 18 08:31:04.682: INFO: Pod "pod-projected-secrets-68468b7d-91a3-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.011386029s
+Jun 18 08:31:07.594: INFO: Pod "pod-projected-secrets-68468b7d-91a3-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.923076645s
+STEP: Saw pod success
+Jun 18 08:31:07.594: INFO: Pod "pod-projected-secrets-68468b7d-91a3-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:31:07.603: INFO: Trying to get logs from node node5 pod pod-projected-secrets-68468b7d-91a3-11e9-bbf5-0e74dabf3615 container projected-secret-volume-test: 
+STEP: delete the pod
+Jun 18 08:31:07.626: INFO: Waiting for pod pod-projected-secrets-68468b7d-91a3-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:31:07.628: INFO: Pod pod-projected-secrets-68468b7d-91a3-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] Projected secret
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:31:07.628: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-q2tlf" for this suite.
+Jun 18 08:31:17.648: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:31:19.612: INFO: namespace: e2e-tests-projected-q2tlf, resource: bindings, ignored listing per whitelist
+Jun 18 08:31:20.595: INFO: namespace e2e-tests-projected-q2tlf deletion completed in 12.959895093s
+
+• [SLOW TEST:20.020 seconds]
+[sig-storage] Projected secret
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:34
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:31:20.595: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-n895h
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward API volume plugin
+Jun 18 08:31:21.644: INFO: Waiting up to 5m0s for pod "downwardapi-volume-73959401-91a3-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-projected-n895h" to be "success or failure"
+Jun 18 08:31:21.652: INFO: Pod "downwardapi-volume-73959401-91a3-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 8.363958ms
+Jun 18 08:31:24.567: INFO: Pod "downwardapi-volume-73959401-91a3-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.923456008s
+Jun 18 08:31:26.572: INFO: Pod "downwardapi-volume-73959401-91a3-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.928525539s
+STEP: Saw pod success
+Jun 18 08:31:26.572: INFO: Pod "downwardapi-volume-73959401-91a3-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:31:26.576: INFO: Trying to get logs from node node5 pod downwardapi-volume-73959401-91a3-11e9-bbf5-0e74dabf3615 container client-container: 
+STEP: delete the pod
+Jun 18 08:31:26.598: INFO: Waiting for pod downwardapi-volume-73959401-91a3-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:31:26.600: INFO: Pod downwardapi-volume-73959401-91a3-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:31:26.600: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-n895h" for this suite.
+Jun 18 08:31:36.614: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:31:37.519: INFO: namespace: e2e-tests-projected-n895h, resource: bindings, ignored listing per whitelist
+Jun 18 08:31:37.543: INFO: namespace e2e-tests-projected-n895h deletion completed in 10.938451159s
+
+• [SLOW TEST:16.948 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SS
+------------------------------
+[sig-storage] Secrets 
+  should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Secrets
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:31:37.543: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename secrets
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-secrets-cpdkm
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating secret with name secret-test-7e525e31-91a3-11e9-bbf5-0e74dabf3615
+STEP: Creating a pod to test consume secrets
+Jun 18 08:31:40.532: INFO: Waiting up to 5m0s for pod "pod-secrets-7e52ebfd-91a3-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-secrets-cpdkm" to be "success or failure"
+Jun 18 08:31:40.539: INFO: Pod "pod-secrets-7e52ebfd-91a3-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 6.89804ms
+Jun 18 08:31:42.542: INFO: Pod "pod-secrets-7e52ebfd-91a3-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.009706389s
+Jun 18 08:31:44.553: INFO: Pod "pod-secrets-7e52ebfd-91a3-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020223526s
+STEP: Saw pod success
+Jun 18 08:31:44.553: INFO: Pod "pod-secrets-7e52ebfd-91a3-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:31:44.557: INFO: Trying to get logs from node node5 pod pod-secrets-7e52ebfd-91a3-11e9-bbf5-0e74dabf3615 container secret-volume-test: 
+STEP: delete the pod
+Jun 18 08:31:44.584: INFO: Waiting for pod pod-secrets-7e52ebfd-91a3-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:31:44.589: INFO: Pod pod-secrets-7e52ebfd-91a3-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:31:44.589: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-secrets-cpdkm" for this suite.
+Jun 18 08:31:52.613: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:31:52.743: INFO: namespace: e2e-tests-secrets-cpdkm, resource: bindings, ignored listing per whitelist
+Jun 18 08:31:52.926: INFO: namespace e2e-tests-secrets-cpdkm deletion completed in 8.326026538s
+
+• [SLOW TEST:15.383 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:34
+  should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSS
+------------------------------
+[sig-network] Services 
+  should provide secure master service  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:31:52.926: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename services
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-services-vrx2g
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:85
+[It] should provide secure master service  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:31:53.545: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-services-vrx2g" for this suite.
+Jun 18 08:32:01.572: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:32:01.654: INFO: namespace: e2e-tests-services-vrx2g, resource: bindings, ignored listing per whitelist
+Jun 18 08:32:01.883: INFO: namespace e2e-tests-services-vrx2g deletion completed in 8.333031183s
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:90
+
+• [SLOW TEST:8.957 seconds]
+[sig-network] Services
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22
+  should provide secure master service  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (root,0644,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:32:01.883: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-tsbg6
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (root,0644,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test emptydir 0644 on node default medium
+Jun 18 08:32:02.722: INFO: Waiting up to 5m0s for pod "pod-8c08f4c0-91a3-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-emptydir-tsbg6" to be "success or failure"
+Jun 18 08:32:02.725: INFO: Pod "pod-8c08f4c0-91a3-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 3.4442ms
+Jun 18 08:32:04.728: INFO: Pod "pod-8c08f4c0-91a3-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.006494097s
+Jun 18 08:32:06.731: INFO: Pod "pod-8c08f4c0-91a3-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.009364153s
+STEP: Saw pod success
+Jun 18 08:32:06.731: INFO: Pod "pod-8c08f4c0-91a3-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:32:06.738: INFO: Trying to get logs from node node5 pod pod-8c08f4c0-91a3-11e9-bbf5-0e74dabf3615 container test-container: 
+STEP: delete the pod
+Jun 18 08:32:06.756: INFO: Waiting for pod pod-8c08f4c0-91a3-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:32:06.760: INFO: Pod pod-8c08f4c0-91a3-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:32:06.760: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-emptydir-tsbg6" for this suite.
+Jun 18 08:32:14.775: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:32:14.833: INFO: namespace: e2e-tests-emptydir-tsbg6, resource: bindings, ignored listing per whitelist
+Jun 18 08:32:15.512: INFO: namespace e2e-tests-emptydir-tsbg6 deletion completed in 8.746688201s
+
+• [SLOW TEST:13.629 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40
+  should support (root,0644,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should provide container's cpu limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:32:15.512: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-4d64h
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should provide container's cpu limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward API volume plugin
+Jun 18 08:32:15.726: INFO: Waiting up to 5m0s for pod "downwardapi-volume-93d094f0-91a3-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-downward-api-4d64h" to be "success or failure"
+Jun 18 08:32:15.732: INFO: Pod "downwardapi-volume-93d094f0-91a3-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 6.030872ms
+Jun 18 08:32:17.745: INFO: Pod "downwardapi-volume-93d094f0-91a3-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018558795s
+Jun 18 08:32:19.747: INFO: Pod "downwardapi-volume-93d094f0-91a3-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020592561s
+STEP: Saw pod success
+Jun 18 08:32:19.747: INFO: Pod "downwardapi-volume-93d094f0-91a3-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:32:19.749: INFO: Trying to get logs from node node5 pod downwardapi-volume-93d094f0-91a3-11e9-bbf5-0e74dabf3615 container client-container: 
+STEP: delete the pod
+Jun 18 08:32:19.760: INFO: Waiting for pod downwardapi-volume-93d094f0-91a3-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:32:19.762: INFO: Pod downwardapi-volume-93d094f0-91a3-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:32:19.763: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-downward-api-4d64h" for this suite.
+Jun 18 08:32:27.776: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:32:27.870: INFO: namespace: e2e-tests-downward-api-4d64h, resource: bindings, ignored listing per whitelist
+Jun 18 08:32:28.086: INFO: namespace e2e-tests-downward-api-4d64h deletion completed in 8.317820476s
+
+• [SLOW TEST:12.575 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should provide container's cpu limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:32:28.087: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-2mqrx
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward API volume plugin
+Jun 18 08:32:28.644: INFO: Waiting up to 5m0s for pod "downwardapi-volume-9b850e49-91a3-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-downward-api-2mqrx" to be "success or failure"
+Jun 18 08:32:28.647: INFO: Pod "downwardapi-volume-9b850e49-91a3-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.915074ms
+Jun 18 08:32:30.650: INFO: Pod "downwardapi-volume-9b850e49-91a3-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.00558318s
+STEP: Saw pod success
+Jun 18 08:32:30.650: INFO: Pod "downwardapi-volume-9b850e49-91a3-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:32:30.654: INFO: Trying to get logs from node node5 pod downwardapi-volume-9b850e49-91a3-11e9-bbf5-0e74dabf3615 container client-container: 
+STEP: delete the pod
+Jun 18 08:32:30.674: INFO: Waiting for pod downwardapi-volume-9b850e49-91a3-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:32:30.676: INFO: Pod downwardapi-volume-9b850e49-91a3-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:32:30.676: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-downward-api-2mqrx" for this suite.
+Jun 18 08:32:38.691: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:32:38.817: INFO: namespace: e2e-tests-downward-api-2mqrx, resource: bindings, ignored listing per whitelist
+Jun 18 08:32:39.526: INFO: namespace e2e-tests-downward-api-2mqrx deletion completed in 8.847128226s
+
+• [SLOW TEST:11.440 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Subpath Atomic writer volumes 
+  should support subpaths with secret pod [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Subpath
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:32:39.527: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename subpath
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-subpath-h9x8q
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] Atomic writer volumes
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38
+STEP: Setting up data
+[It] should support subpaths with secret pod [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating pod pod-subpath-test-secret-pvl7
+STEP: Creating a pod to test atomic-volume-subpath
+Jun 18 08:32:39.717: INFO: Waiting up to 5m0s for pod "pod-subpath-test-secret-pvl7" in namespace "e2e-tests-subpath-h9x8q" to be "success or failure"
+Jun 18 08:32:39.738: INFO: Pod "pod-subpath-test-secret-pvl7": Phase="Pending", Reason="", readiness=false. Elapsed: 20.814424ms
+Jun 18 08:32:41.748: INFO: Pod "pod-subpath-test-secret-pvl7": Phase="Pending", Reason="", readiness=false. Elapsed: 2.030369236s
+Jun 18 08:32:43.751: INFO: Pod "pod-subpath-test-secret-pvl7": Phase="Running", Reason="", readiness=false. Elapsed: 4.033375323s
+Jun 18 08:32:45.755: INFO: Pod "pod-subpath-test-secret-pvl7": Phase="Running", Reason="", readiness=false. Elapsed: 6.037205955s
+Jun 18 08:32:47.757: INFO: Pod "pod-subpath-test-secret-pvl7": Phase="Running", Reason="", readiness=false. Elapsed: 8.039954824s
+Jun 18 08:32:49.760: INFO: Pod "pod-subpath-test-secret-pvl7": Phase="Running", Reason="", readiness=false. Elapsed: 10.042419515s
+Jun 18 08:32:51.762: INFO: Pod "pod-subpath-test-secret-pvl7": Phase="Running", Reason="", readiness=false. Elapsed: 12.045040388s
+Jun 18 08:32:53.765: INFO: Pod "pod-subpath-test-secret-pvl7": Phase="Running", Reason="", readiness=false. Elapsed: 14.04790931s
+Jun 18 08:32:55.768: INFO: Pod "pod-subpath-test-secret-pvl7": Phase="Running", Reason="", readiness=false. Elapsed: 16.050194832s
+Jun 18 08:32:57.770: INFO: Pod "pod-subpath-test-secret-pvl7": Phase="Running", Reason="", readiness=false. Elapsed: 18.052848639s
+Jun 18 08:32:59.773: INFO: Pod "pod-subpath-test-secret-pvl7": Phase="Running", Reason="", readiness=false. Elapsed: 20.055800152s
+Jun 18 08:33:01.776: INFO: Pod "pod-subpath-test-secret-pvl7": Phase="Running", Reason="", readiness=false. Elapsed: 22.058209225s
+Jun 18 08:33:03.778: INFO: Pod "pod-subpath-test-secret-pvl7": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.060633653s
+STEP: Saw pod success
+Jun 18 08:33:03.778: INFO: Pod "pod-subpath-test-secret-pvl7" satisfied condition "success or failure"
+Jun 18 08:33:03.780: INFO: Trying to get logs from node node5 pod pod-subpath-test-secret-pvl7 container test-container-subpath-secret-pvl7: 
+STEP: delete the pod
+Jun 18 08:33:03.795: INFO: Waiting for pod pod-subpath-test-secret-pvl7 to disappear
+Jun 18 08:33:03.796: INFO: Pod pod-subpath-test-secret-pvl7 no longer exists
+STEP: Deleting pod pod-subpath-test-secret-pvl7
+Jun 18 08:33:03.796: INFO: Deleting pod "pod-subpath-test-secret-pvl7" in namespace "e2e-tests-subpath-h9x8q"
+[AfterEach] [sig-storage] Subpath
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:33:03.798: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-subpath-h9x8q" for this suite.
+Jun 18 08:33:12.514: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:33:12.620: INFO: namespace: e2e-tests-subpath-h9x8q, resource: bindings, ignored listing per whitelist
+Jun 18 08:33:12.842: INFO: namespace e2e-tests-subpath-h9x8q deletion completed in 9.038874397s
+
+• [SLOW TEST:33.315 seconds]
+[sig-storage] Subpath
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22
+  Atomic writer volumes
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34
+    should support subpaths with secret pod [Conformance]
+    /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  volume on tmpfs should have the correct mode [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:33:12.842: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-n9xbt
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] volume on tmpfs should have the correct mode [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test emptydir volume type on tmpfs
+Jun 18 08:33:13.667: INFO: Waiting up to 5m0s for pod "pod-b65b0bf6-91a3-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-emptydir-n9xbt" to be "success or failure"
+Jun 18 08:33:13.670: INFO: Pod "pod-b65b0bf6-91a3-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.289338ms
+Jun 18 08:33:15.674: INFO: Pod "pod-b65b0bf6-91a3-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006626654s
+STEP: Saw pod success
+Jun 18 08:33:15.674: INFO: Pod "pod-b65b0bf6-91a3-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:33:15.676: INFO: Trying to get logs from node node5 pod pod-b65b0bf6-91a3-11e9-bbf5-0e74dabf3615 container test-container: 
+STEP: delete the pod
+Jun 18 08:33:15.693: INFO: Waiting for pod pod-b65b0bf6-91a3-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:33:15.694: INFO: Pod pod-b65b0bf6-91a3-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:33:15.694: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-emptydir-n9xbt" for this suite.
+Jun 18 08:33:23.708: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:33:24.544: INFO: namespace: e2e-tests-emptydir-n9xbt, resource: bindings, ignored listing per whitelist
+Jun 18 08:33:24.610: INFO: namespace e2e-tests-emptydir-n9xbt deletion completed in 8.911249005s
+
+• [SLOW TEST:11.768 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40
+  volume on tmpfs should have the correct mode [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Secrets 
+  should be consumable via the environment [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-api-machinery] Secrets
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:33:24.610: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename secrets
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-secrets-ncfms
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable via the environment [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating secret e2e-tests-secrets-ncfms/secret-test-bd6b3055-91a3-11e9-bbf5-0e74dabf3615
+STEP: Creating a pod to test consume secrets
+Jun 18 08:33:25.563: INFO: Waiting up to 5m0s for pod "pod-configmaps-bd6fcdbf-91a3-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-secrets-ncfms" to be "success or failure"
+Jun 18 08:33:25.565: INFO: Pod "pod-configmaps-bd6fcdbf-91a3-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.17539ms
+Jun 18 08:33:27.568: INFO: Pod "pod-configmaps-bd6fcdbf-91a3-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.004474077s
+STEP: Saw pod success
+Jun 18 08:33:27.568: INFO: Pod "pod-configmaps-bd6fcdbf-91a3-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:33:27.569: INFO: Trying to get logs from node node5 pod pod-configmaps-bd6fcdbf-91a3-11e9-bbf5-0e74dabf3615 container env-test: 
+STEP: delete the pod
+Jun 18 08:33:27.588: INFO: Waiting for pod pod-configmaps-bd6fcdbf-91a3-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:33:27.590: INFO: Pod pod-configmaps-bd6fcdbf-91a3-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-api-machinery] Secrets
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:33:27.590: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-secrets-ncfms" for this suite.
+Jun 18 08:33:35.613: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:33:36.560: INFO: namespace: e2e-tests-secrets-ncfms, resource: bindings, ignored listing per whitelist
+Jun 18 08:33:36.735: INFO: namespace e2e-tests-secrets-ncfms deletion completed in 9.139956271s
+
+• [SLOW TEST:12.125 seconds]
+[sig-api-machinery] Secrets
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets.go:32
+  should be consumable via the environment [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (non-root,0777,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:33:36.736: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-pfqkn
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (non-root,0777,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test emptydir 0777 on node default medium
+Jun 18 08:33:37.676: INFO: Waiting up to 5m0s for pod "pod-c4aa8620-91a3-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-emptydir-pfqkn" to be "success or failure"
+Jun 18 08:33:37.678: INFO: Pod "pod-c4aa8620-91a3-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.622565ms
+Jun 18 08:33:39.681: INFO: Pod "pod-c4aa8620-91a3-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.005491082s
+Jun 18 08:33:41.684: INFO: Pod "pod-c4aa8620-91a3-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.008267828s
+STEP: Saw pod success
+Jun 18 08:33:41.684: INFO: Pod "pod-c4aa8620-91a3-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:33:41.686: INFO: Trying to get logs from node node5 pod pod-c4aa8620-91a3-11e9-bbf5-0e74dabf3615 container test-container: 
+STEP: delete the pod
+Jun 18 08:33:41.700: INFO: Waiting for pod pod-c4aa8620-91a3-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:33:41.704: INFO: Pod pod-c4aa8620-91a3-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:33:41.704: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-emptydir-pfqkn" for this suite.
+Jun 18 08:33:49.719: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:33:49.833: INFO: namespace: e2e-tests-emptydir-pfqkn, resource: bindings, ignored listing per whitelist
+Jun 18 08:33:50.516: INFO: namespace e2e-tests-emptydir-pfqkn deletion completed in 8.807562387s
+
+• [SLOW TEST:13.780 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40
+  should support (non-root,0777,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSS
+------------------------------
+[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook 
+  should execute poststart exec hook properly [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Container Lifecycle Hook
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:33:50.516: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename container-lifecycle-hook
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-container-lifecycle-hook-kp2n8
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] when create a pod with lifecycle hook
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:61
+STEP: create the container to handle the HTTPGet hook request.
+[It] should execute poststart exec hook properly [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: create the pod with lifecycle hook
+STEP: check poststart hook
+STEP: delete the pod with lifecycle hook
+Jun 18 08:33:58.769: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun 18 08:33:58.772: INFO: Pod pod-with-poststart-exec-hook still exists
+Jun 18 08:34:00.772: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun 18 08:34:00.776: INFO: Pod pod-with-poststart-exec-hook still exists
+Jun 18 08:34:02.772: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun 18 08:34:02.775: INFO: Pod pod-with-poststart-exec-hook still exists
+Jun 18 08:34:04.772: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun 18 08:34:04.774: INFO: Pod pod-with-poststart-exec-hook still exists
+Jun 18 08:34:06.772: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun 18 08:34:06.778: INFO: Pod pod-with-poststart-exec-hook still exists
+Jun 18 08:34:08.772: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun 18 08:34:08.775: INFO: Pod pod-with-poststart-exec-hook still exists
+Jun 18 08:34:10.772: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun 18 08:34:10.774: INFO: Pod pod-with-poststart-exec-hook still exists
+Jun 18 08:34:12.772: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun 18 08:34:13.564: INFO: Pod pod-with-poststart-exec-hook still exists
+Jun 18 08:34:14.772: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun 18 08:34:14.774: INFO: Pod pod-with-poststart-exec-hook still exists
+Jun 18 08:34:16.772: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun 18 08:34:16.774: INFO: Pod pod-with-poststart-exec-hook no longer exists
+[AfterEach] [k8s.io] Container Lifecycle Hook
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:34:16.774: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-container-lifecycle-hook-kp2n8" for this suite.
+Jun 18 08:34:40.786: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:34:40.836: INFO: namespace: e2e-tests-container-lifecycle-hook-kp2n8, resource: bindings, ignored listing per whitelist
+Jun 18 08:34:41.520: INFO: namespace e2e-tests-container-lifecycle-hook-kp2n8 deletion completed in 24.742247217s
+
+• [SLOW TEST:51.004 seconds]
+[k8s.io] Container Lifecycle Hook
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  when create a pod with lifecycle hook
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:40
+    should execute poststart exec hook properly [NodeConformance] [Conformance]
+    /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected configMap 
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:34:41.520: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-hhqnc
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name projected-configmap-test-volume-map-ead599ca-91a3-11e9-bbf5-0e74dabf3615
+STEP: Creating a pod to test consume configMaps
+Jun 18 08:34:41.714: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-ead5f989-91a3-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-projected-hhqnc" to be "success or failure"
+Jun 18 08:34:41.716: INFO: Pod "pod-projected-configmaps-ead5f989-91a3-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 1.664657ms
+Jun 18 08:34:43.719: INFO: Pod "pod-projected-configmaps-ead5f989-91a3-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.00470215s
+STEP: Saw pod success
+Jun 18 08:34:43.719: INFO: Pod "pod-projected-configmaps-ead5f989-91a3-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:34:43.721: INFO: Trying to get logs from node node5 pod pod-projected-configmaps-ead5f989-91a3-11e9-bbf5-0e74dabf3615 container projected-configmap-volume-test: 
+STEP: delete the pod
+Jun 18 08:34:43.734: INFO: Waiting for pod pod-projected-configmaps-ead5f989-91a3-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:34:43.735: INFO: Pod pod-projected-configmaps-ead5f989-91a3-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:34:43.736: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-hhqnc" for this suite.
+Jun 18 08:34:51.749: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:34:52.587: INFO: namespace: e2e-tests-projected-hhqnc, resource: bindings, ignored listing per whitelist
+Jun 18 08:34:52.600: INFO: namespace e2e-tests-projected-hhqnc deletion completed in 8.8606257s
+
+• [SLOW TEST:11.080 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:34
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSS
+------------------------------
+[sig-storage] Secrets 
+  should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Secrets
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:34:52.600: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename secrets
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-secrets-lq87r
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating secret with name secret-test-f1e30ab1-91a3-11e9-bbf5-0e74dabf3615
+STEP: Creating a pod to test consume secrets
+Jun 18 08:34:53.569: INFO: Waiting up to 5m0s for pod "pod-secrets-f1e4eeb0-91a3-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-secrets-lq87r" to be "success or failure"
+Jun 18 08:34:53.586: INFO: Pod "pod-secrets-f1e4eeb0-91a3-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 17.043651ms
+Jun 18 08:34:55.588: INFO: Pod "pod-secrets-f1e4eeb0-91a3-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019607705s
+Jun 18 08:34:57.591: INFO: Pod "pod-secrets-f1e4eeb0-91a3-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.022434193s
+STEP: Saw pod success
+Jun 18 08:34:57.591: INFO: Pod "pod-secrets-f1e4eeb0-91a3-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:34:57.593: INFO: Trying to get logs from node node5 pod pod-secrets-f1e4eeb0-91a3-11e9-bbf5-0e74dabf3615 container secret-volume-test: 
+STEP: delete the pod
+Jun 18 08:34:57.610: INFO: Waiting for pod pod-secrets-f1e4eeb0-91a3-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:34:57.612: INFO: Pod pod-secrets-f1e4eeb0-91a3-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:34:57.612: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-secrets-lq87r" for this suite.
+Jun 18 08:35:05.666: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:35:07.604: INFO: namespace: e2e-tests-secrets-lq87r, resource: bindings, ignored listing per whitelist
+Jun 18 08:35:07.715: INFO: namespace e2e-tests-secrets-lq87r deletion completed in 10.100126985s
+
+• [SLOW TEST:15.115 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:34
+  should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (non-root,0666,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:35:07.715: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-g2lqp
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (non-root,0666,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test emptydir 0666 on node default medium
+Jun 18 08:35:08.708: INFO: Waiting up to 5m0s for pod "pod-faec866e-91a3-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-emptydir-g2lqp" to be "success or failure"
+Jun 18 08:35:08.723: INFO: Pod "pod-faec866e-91a3-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 15.465072ms
+Jun 18 08:35:10.726: INFO: Pod "pod-faec866e-91a3-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.018212161s
+STEP: Saw pod success
+Jun 18 08:35:10.726: INFO: Pod "pod-faec866e-91a3-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:35:10.728: INFO: Trying to get logs from node node5 pod pod-faec866e-91a3-11e9-bbf5-0e74dabf3615 container test-container: 
+STEP: delete the pod
+Jun 18 08:35:10.743: INFO: Waiting for pod pod-faec866e-91a3-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:35:10.744: INFO: Pod pod-faec866e-91a3-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:35:10.744: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-emptydir-g2lqp" for this suite.
+Jun 18 08:35:16.756: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:35:16.775: INFO: namespace: e2e-tests-emptydir-g2lqp, resource: bindings, ignored listing per whitelist
+Jun 18 08:35:17.612: INFO: namespace e2e-tests-emptydir-g2lqp deletion completed in 6.864270137s
+
+• [SLOW TEST:9.897 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40
+  should support (non-root,0666,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[k8s.io] [sig-node] Events 
+  should be sent by kubelets and the scheduler about pods scheduling and running  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] [sig-node] Events
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:35:17.612: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename events
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-events-wp9qw
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be sent by kubelets and the scheduler about pods scheduling and running  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating the pod
+STEP: submitting the pod to kubernetes
+STEP: verifying the pod is in kubernetes
+STEP: retrieving the pod
+Jun 18 08:35:22.534: INFO: &Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:send-events-00c52bf8-91a4-11e9-bbf5-0e74dabf3615,GenerateName:,Namespace:e2e-tests-events-wp9qw,SelfLink:/api/v1/namespaces/e2e-tests-events-wp9qw/pods/send-events-00c52bf8-91a4-11e9-bbf5-0e74dabf3615,UID:00c54851-91a4-11e9-8cfd-00163e000a67,ResourceVersion:13554284,Generation:0,CreationTimestamp:2019-06-18 08:35:18 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: foo,time: 509676377,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-d5xgp {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-d5xgp,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{p reg.kpaas.io/kubernetes-e2e-test-images/serve-hostname:1.1 [] []  [{ 0 80 TCP }] [] [] {map[] map[]} [{default-token-d5xgp true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*30,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node5,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc00295f6e0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc00295f700}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 08:35:18 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 08:35:21 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 08:35:21 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 08:35:18 +0000 UTC  }],Message:,Reason:,HostIP:192.168.2.155,PodIP:171.171.33.167,StartTime:2019-06-18 08:35:18 +0000 UTC,ContainerStatuses:[{p {nil ContainerStateRunning{StartedAt:2019-06-18 08:35:20 +0000 UTC,} nil} {nil nil nil} true 0 reg.kpaas.io/kubernetes-e2e-test-images/serve-hostname:1.1 docker-pullable://reg.kpaas.io/kubernetes-e2e-test-images/serve-hostname@sha256:53c28beabd3509fb5b1d1185b2962e8204384cef7562982d8b216b71292aabf9 docker://b0a321a8151f2e185f16a838ee2c7425a773c74c033dae78464a9a3a3e32285e}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+
+STEP: checking for scheduler event about the pod
+Jun 18 08:35:24.538: INFO: Saw scheduler event for our pod.
+STEP: checking for kubelet event about the pod
+Jun 18 08:35:26.541: INFO: Saw kubelet event for our pod.
+STEP: deleting the pod
+[AfterEach] [k8s.io] [sig-node] Events
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:35:26.546: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-events-wp9qw" for this suite.
+Jun 18 08:36:14.561: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:36:14.683: INFO: namespace: e2e-tests-events-wp9qw, resource: bindings, ignored listing per whitelist
+Jun 18 08:36:15.535: INFO: namespace e2e-tests-events-wp9qw deletion completed in 48.985081527s
+
+• [SLOW TEST:57.923 seconds]
+[k8s.io] [sig-node] Events
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should be sent by kubelets and the scheduler about pods scheduling and running  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should update labels on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:36:15.535: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-hs9lv
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should update labels on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating the pod
+Jun 18 08:36:23.520: INFO: Successfully updated pod "labelsupdate23f0d9cf-91a4-11e9-bbf5-0e74dabf3615"
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:36:25.543: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-hs9lv" for this suite.
+Jun 18 08:36:53.561: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:36:53.579: INFO: namespace: e2e-tests-projected-hs9lv, resource: bindings, ignored listing per whitelist
+Jun 18 08:36:54.555: INFO: namespace e2e-tests-projected-hs9lv deletion completed in 29.008614546s
+
+• [SLOW TEST:39.020 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should update labels on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSS
+------------------------------
+[sig-node] Downward API 
+  should provide host IP as an env var [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-node] Downward API
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:36:54.555: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-tnphw
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide host IP as an env var [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward api env vars
+Jun 18 08:36:55.557: INFO: Waiting up to 5m0s for pod "downward-api-3a9adc09-91a4-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-downward-api-tnphw" to be "success or failure"
+Jun 18 08:36:55.566: INFO: Pod "downward-api-3a9adc09-91a4-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 8.8239ms
+Jun 18 08:36:57.569: INFO: Pod "downward-api-3a9adc09-91a4-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.011611703s
+Jun 18 08:36:59.572: INFO: Pod "downward-api-3a9adc09-91a4-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.014565076s
+STEP: Saw pod success
+Jun 18 08:36:59.572: INFO: Pod "downward-api-3a9adc09-91a4-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:36:59.574: INFO: Trying to get logs from node node5 pod downward-api-3a9adc09-91a4-11e9-bbf5-0e74dabf3615 container dapi-container: 
+STEP: delete the pod
+Jun 18 08:36:59.594: INFO: Waiting for pod downward-api-3a9adc09-91a4-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:36:59.596: INFO: Pod downward-api-3a9adc09-91a4-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-node] Downward API
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:36:59.596: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-downward-api-tnphw" for this suite.
+Jun 18 08:37:07.611: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:37:07.756: INFO: namespace: e2e-tests-downward-api-tnphw, resource: bindings, ignored listing per whitelist
+Jun 18 08:37:08.530: INFO: namespace e2e-tests-downward-api-tnphw deletion completed in 8.9300332s
+
+• [SLOW TEST:13.975 seconds]
+[sig-node] Downward API
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:38
+  should provide host IP as an env var [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl run job 
+  should create a job from an image when restart is OnFailure  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:37:08.530: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-knqtb
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[BeforeEach] [k8s.io] Kubectl run job
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1454
+[It] should create a job from an image when restart is OnFailure  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: running the image docker.io/library/nginx:1.14-alpine
+Jun 18 08:37:09.523: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 run e2e-test-nginx-job --restart=OnFailure --generator=job/v1 --image=docker.io/library/nginx:1.14-alpine --namespace=e2e-tests-kubectl-knqtb'
+Jun 18 08:37:10.568: INFO: stderr: "kubectl run --generator=job/v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n"
+Jun 18 08:37:10.568: INFO: stdout: "job.batch/e2e-test-nginx-job created\n"
+STEP: verifying the job e2e-test-nginx-job was created
+[AfterEach] [k8s.io] Kubectl run job
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1459
+Jun 18 08:37:10.573: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 delete jobs e2e-test-nginx-job --namespace=e2e-tests-kubectl-knqtb'
+Jun 18 08:37:10.659: INFO: stderr: ""
+Jun 18 08:37:10.659: INFO: stdout: "job.batch \"e2e-test-nginx-job\" deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:37:10.660: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-knqtb" for this suite.
+Jun 18 08:37:34.684: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:37:35.551: INFO: namespace: e2e-tests-kubectl-knqtb, resource: bindings, ignored listing per whitelist
+Jun 18 08:37:35.586: INFO: namespace e2e-tests-kubectl-knqtb deletion completed in 24.909981679s
+
+• [SLOW TEST:27.056 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Kubectl run job
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should create a job from an image when restart is OnFailure  [Conformance]
+    /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (non-root,0777,tmpfs) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:37:35.586: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-8twb8
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (non-root,0777,tmpfs) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test emptydir 0777 on tmpfs
+Jun 18 08:37:35.765: INFO: Waiting up to 5m0s for pod "pod-52943d2d-91a4-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-emptydir-8twb8" to be "success or failure"
+Jun 18 08:37:35.767: INFO: Pod "pod-52943d2d-91a4-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 1.79564ms
+Jun 18 08:37:37.770: INFO: Pod "pod-52943d2d-91a4-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.004453371s
+Jun 18 08:37:39.775: INFO: Pod "pod-52943d2d-91a4-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.009391659s
+STEP: Saw pod success
+Jun 18 08:37:39.775: INFO: Pod "pod-52943d2d-91a4-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:37:39.777: INFO: Trying to get logs from node node5 pod pod-52943d2d-91a4-11e9-bbf5-0e74dabf3615 container test-container: 
+STEP: delete the pod
+Jun 18 08:37:40.513: INFO: Waiting for pod pod-52943d2d-91a4-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:37:40.722: INFO: Pod pod-52943d2d-91a4-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:37:40.722: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-emptydir-8twb8" for this suite.
+Jun 18 08:37:49.517: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:37:49.589: INFO: namespace: e2e-tests-emptydir-8twb8, resource: bindings, ignored listing per whitelist
+Jun 18 08:37:50.663: INFO: namespace e2e-tests-emptydir-8twb8 deletion completed in 9.936982227s
+
+• [SLOW TEST:15.076 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40
+  should support (non-root,0777,tmpfs) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[k8s.io] InitContainer [NodeConformance] 
+  should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:37:50.663: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename init-container
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-init-container-tn7j2
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:43
+[It] should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating the pod
+Jun 18 08:37:51.627: INFO: PodSpec: initContainers in spec.initContainers
+[AfterEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:37:54.517: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-init-container-tn7j2" for this suite.
+Jun 18 08:38:02.540: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:38:02.615: INFO: namespace: e2e-tests-init-container-tn7j2, resource: bindings, ignored listing per whitelist
+Jun 18 08:38:02.863: INFO: namespace e2e-tests-init-container-tn7j2 deletion completed in 8.340060476s
+
+• [SLOW TEST:12.200 seconds]
+[k8s.io] InitContainer [NodeConformance]
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] ReplicaSet 
+  should adopt matching pods on creation and release no longer matching pods [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-apps] ReplicaSet
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:38:02.863: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename replicaset
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-replicaset-nmldn
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should adopt matching pods on creation and release no longer matching pods [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Given a Pod with a 'name' label pod-adoption-release is created
+STEP: When a replicaset with a matching selector is created
+STEP: Then the orphan pod is adopted
+STEP: When the matched label of one of its pods change
+Jun 18 08:39:57.543: INFO: Pod name pod-adoption-release: Found 1 pods out of 1
+STEP: Then the pod is released
+[AfterEach] [sig-apps] ReplicaSet
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:39:57.557: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-replicaset-nmldn" for this suite.
+Jun 18 08:40:23.586: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:40:23.708: INFO: namespace: e2e-tests-replicaset-nmldn, resource: bindings, ignored listing per whitelist
+Jun 18 08:40:23.902: INFO: namespace e2e-tests-replicaset-nmldn deletion completed in 26.337181113s
+
+• [SLOW TEST:141.038 seconds]
+[sig-apps] ReplicaSet
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should adopt matching pods on creation and release no longer matching pods [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (non-root,0644,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:40:23.902: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-6k2p8
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (non-root,0644,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test emptydir 0644 on node default medium
+Jun 18 08:40:24.651: INFO: Waiting up to 5m0s for pod "pod-b73e0c0d-91a4-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-emptydir-6k2p8" to be "success or failure"
+Jun 18 08:40:24.653: INFO: Pod "pod-b73e0c0d-91a4-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 1.841225ms
+Jun 18 08:40:26.657: INFO: Pod "pod-b73e0c0d-91a4-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.00610906s
+STEP: Saw pod success
+Jun 18 08:40:26.657: INFO: Pod "pod-b73e0c0d-91a4-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:40:26.659: INFO: Trying to get logs from node node5 pod pod-b73e0c0d-91a4-11e9-bbf5-0e74dabf3615 container test-container: 
+STEP: delete the pod
+Jun 18 08:40:26.672: INFO: Waiting for pod pod-b73e0c0d-91a4-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:40:26.675: INFO: Pod pod-b73e0c0d-91a4-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:40:26.675: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-emptydir-6k2p8" for this suite.
+Jun 18 08:40:36.693: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:40:37.582: INFO: namespace: e2e-tests-emptydir-6k2p8, resource: bindings, ignored listing per whitelist
+Jun 18 08:40:37.614: INFO: namespace e2e-tests-emptydir-6k2p8 deletion completed in 10.934702114s
+
+• [SLOW TEST:13.711 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40
+  should support (non-root,0644,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSS
+------------------------------
+[sig-auth] ServiceAccounts 
+  should mount an API token into pods  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-auth] ServiceAccounts
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:40:37.614: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename svcaccounts
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-svcaccounts-nnb2d
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should mount an API token into pods  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: getting the auto-created API token
+STEP: Creating a pod to test consume service account token
+Jun 18 08:40:38.520: INFO: Waiting up to 5m0s for pod "pod-service-account-bf820447-91a4-11e9-bbf5-0e74dabf3615-mj2l6" in namespace "e2e-tests-svcaccounts-nnb2d" to be "success or failure"
+Jun 18 08:40:38.523: INFO: Pod "pod-service-account-bf820447-91a4-11e9-bbf5-0e74dabf3615-mj2l6": Phase="Pending", Reason="", readiness=false. Elapsed: 2.745907ms
+Jun 18 08:40:40.528: INFO: Pod "pod-service-account-bf820447-91a4-11e9-bbf5-0e74dabf3615-mj2l6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.008005592s
+STEP: Saw pod success
+Jun 18 08:40:40.528: INFO: Pod "pod-service-account-bf820447-91a4-11e9-bbf5-0e74dabf3615-mj2l6" satisfied condition "success or failure"
+Jun 18 08:40:40.531: INFO: Trying to get logs from node node5 pod pod-service-account-bf820447-91a4-11e9-bbf5-0e74dabf3615-mj2l6 container token-test: 
+STEP: delete the pod
+Jun 18 08:40:40.555: INFO: Waiting for pod pod-service-account-bf820447-91a4-11e9-bbf5-0e74dabf3615-mj2l6 to disappear
+Jun 18 08:40:40.557: INFO: Pod pod-service-account-bf820447-91a4-11e9-bbf5-0e74dabf3615-mj2l6 no longer exists
+STEP: Creating a pod to test consume service account root CA
+Jun 18 08:40:40.568: INFO: Waiting up to 5m0s for pod "pod-service-account-bf820447-91a4-11e9-bbf5-0e74dabf3615-l4bkl" in namespace "e2e-tests-svcaccounts-nnb2d" to be "success or failure"
+Jun 18 08:40:40.570: INFO: Pod "pod-service-account-bf820447-91a4-11e9-bbf5-0e74dabf3615-l4bkl": Phase="Pending", Reason="", readiness=false. Elapsed: 2.109955ms
+Jun 18 08:40:42.578: INFO: Pod "pod-service-account-bf820447-91a4-11e9-bbf5-0e74dabf3615-l4bkl": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.009936229s
+STEP: Saw pod success
+Jun 18 08:40:42.578: INFO: Pod "pod-service-account-bf820447-91a4-11e9-bbf5-0e74dabf3615-l4bkl" satisfied condition "success or failure"
+Jun 18 08:40:42.580: INFO: Trying to get logs from node node5 pod pod-service-account-bf820447-91a4-11e9-bbf5-0e74dabf3615-l4bkl container root-ca-test: 
+STEP: delete the pod
+Jun 18 08:40:42.598: INFO: Waiting for pod pod-service-account-bf820447-91a4-11e9-bbf5-0e74dabf3615-l4bkl to disappear
+Jun 18 08:40:42.601: INFO: Pod pod-service-account-bf820447-91a4-11e9-bbf5-0e74dabf3615-l4bkl no longer exists
+STEP: Creating a pod to test consume service account namespace
+Jun 18 08:40:42.605: INFO: Waiting up to 5m0s for pod "pod-service-account-bf820447-91a4-11e9-bbf5-0e74dabf3615-xs5wc" in namespace "e2e-tests-svcaccounts-nnb2d" to be "success or failure"
+Jun 18 08:40:42.607: INFO: Pod "pod-service-account-bf820447-91a4-11e9-bbf5-0e74dabf3615-xs5wc": Phase="Pending", Reason="", readiness=false. Elapsed: 1.731407ms
+Jun 18 08:40:44.610: INFO: Pod "pod-service-account-bf820447-91a4-11e9-bbf5-0e74dabf3615-xs5wc": Phase="Pending", Reason="", readiness=false. Elapsed: 2.004985167s
+Jun 18 08:40:46.613: INFO: Pod "pod-service-account-bf820447-91a4-11e9-bbf5-0e74dabf3615-xs5wc": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.007694122s
+STEP: Saw pod success
+Jun 18 08:40:46.613: INFO: Pod "pod-service-account-bf820447-91a4-11e9-bbf5-0e74dabf3615-xs5wc" satisfied condition "success or failure"
+Jun 18 08:40:46.616: INFO: Trying to get logs from node node5 pod pod-service-account-bf820447-91a4-11e9-bbf5-0e74dabf3615-xs5wc container namespace-test: 
+STEP: delete the pod
+Jun 18 08:40:46.645: INFO: Waiting for pod pod-service-account-bf820447-91a4-11e9-bbf5-0e74dabf3615-xs5wc to disappear
+Jun 18 08:40:46.648: INFO: Pod pod-service-account-bf820447-91a4-11e9-bbf5-0e74dabf3615-xs5wc no longer exists
+[AfterEach] [sig-auth] ServiceAccounts
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:40:46.648: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-svcaccounts-nnb2d" for this suite.
+Jun 18 08:40:56.665: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:40:57.556: INFO: namespace: e2e-tests-svcaccounts-nnb2d, resource: bindings, ignored listing per whitelist
+Jun 18 08:40:57.657: INFO: namespace e2e-tests-svcaccounts-nnb2d deletion completed in 11.004778576s
+
+• [SLOW TEST:20.044 seconds]
+[sig-auth] ServiceAccounts
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/auth/framework.go:22
+  should mount an API token into pods  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl rolling-update 
+  should support rolling-update to same image  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:40:57.658: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-f6tj5
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[BeforeEach] [k8s.io] Kubectl rolling-update
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1358
+[It] should support rolling-update to same image  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: running the image docker.io/library/nginx:1.14-alpine
+Jun 18 08:40:58.666: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 run e2e-test-nginx-rc --image=docker.io/library/nginx:1.14-alpine --generator=run/v1 --namespace=e2e-tests-kubectl-f6tj5'
+Jun 18 08:40:59.565: INFO: stderr: "kubectl run --generator=run/v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n"
+Jun 18 08:40:59.565: INFO: stdout: "replicationcontroller/e2e-test-nginx-rc created\n"
+STEP: verifying the rc e2e-test-nginx-rc was created
+Jun 18 08:40:59.620: INFO: Waiting for rc e2e-test-nginx-rc to stabilize, generation 1 observed generation 0 spec.replicas 1 status.replicas 0
+Jun 18 08:41:00.549: INFO: Waiting for rc e2e-test-nginx-rc to stabilize, generation 1 observed generation 1 spec.replicas 1 status.replicas 0
+STEP: rolling-update to same image controller
+Jun 18 08:41:00.635: INFO: scanned /root for discovery docs: 
+Jun 18 08:41:00.635: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 rolling-update e2e-test-nginx-rc --update-period=1s --image=docker.io/library/nginx:1.14-alpine --image-pull-policy=IfNotPresent --namespace=e2e-tests-kubectl-f6tj5'
+Jun 18 08:41:19.671: INFO: stderr: "Command \"rolling-update\" is deprecated, use \"rollout\" instead\n"
+Jun 18 08:41:19.671: INFO: stdout: "Created e2e-test-nginx-rc-56ee10c919b562077253f292a54e26a8\nScaling up e2e-test-nginx-rc-56ee10c919b562077253f292a54e26a8 from 0 to 1, scaling down e2e-test-nginx-rc from 1 to 0 (keep 1 pods available, don't exceed 2 pods)\nScaling e2e-test-nginx-rc-56ee10c919b562077253f292a54e26a8 up to 1\nScaling e2e-test-nginx-rc down to 0\nUpdate succeeded. Deleting old controller: e2e-test-nginx-rc\nRenaming e2e-test-nginx-rc-56ee10c919b562077253f292a54e26a8 to e2e-test-nginx-rc\nreplicationcontroller/e2e-test-nginx-rc rolling updated\n"
+Jun 18 08:41:19.671: INFO: stdout: "Created e2e-test-nginx-rc-56ee10c919b562077253f292a54e26a8\nScaling up e2e-test-nginx-rc-56ee10c919b562077253f292a54e26a8 from 0 to 1, scaling down e2e-test-nginx-rc from 1 to 0 (keep 1 pods available, don't exceed 2 pods)\nScaling e2e-test-nginx-rc-56ee10c919b562077253f292a54e26a8 up to 1\nScaling e2e-test-nginx-rc down to 0\nUpdate succeeded. Deleting old controller: e2e-test-nginx-rc\nRenaming e2e-test-nginx-rc-56ee10c919b562077253f292a54e26a8 to e2e-test-nginx-rc\nreplicationcontroller/e2e-test-nginx-rc rolling updated\n"
+STEP: waiting for all containers in run=e2e-test-nginx-rc pods to come up.
+Jun 18 08:41:19.671: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l run=e2e-test-nginx-rc --namespace=e2e-tests-kubectl-f6tj5'
+Jun 18 08:41:20.586: INFO: stderr: ""
+Jun 18 08:41:20.586: INFO: stdout: "e2e-test-nginx-rc-56ee10c919b562077253f292a54e26a8-4qs5l "
+Jun 18 08:41:20.586: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods e2e-test-nginx-rc-56ee10c919b562077253f292a54e26a8-4qs5l -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "e2e-test-nginx-rc") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-f6tj5'
+Jun 18 08:41:20.671: INFO: stderr: ""
+Jun 18 08:41:20.671: INFO: stdout: "true"
+Jun 18 08:41:20.671: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods e2e-test-nginx-rc-56ee10c919b562077253f292a54e26a8-4qs5l -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "e2e-test-nginx-rc"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-f6tj5'
+Jun 18 08:41:21.528: INFO: stderr: ""
+Jun 18 08:41:21.528: INFO: stdout: "docker.io/library/nginx:1.14-alpine"
+Jun 18 08:41:21.528: INFO: e2e-test-nginx-rc-56ee10c919b562077253f292a54e26a8-4qs5l is verified up and running
+[AfterEach] [k8s.io] Kubectl rolling-update
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1364
+Jun 18 08:41:21.528: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 delete rc e2e-test-nginx-rc --namespace=e2e-tests-kubectl-f6tj5'
+Jun 18 08:41:21.614: INFO: stderr: ""
+Jun 18 08:41:21.614: INFO: stdout: "replicationcontroller \"e2e-test-nginx-rc\" deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:41:21.614: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-f6tj5" for this suite.
+Jun 18 08:41:31.633: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:41:32.630: INFO: namespace: e2e-tests-kubectl-f6tj5, resource: bindings, ignored listing per whitelist
+Jun 18 08:41:33.523: INFO: namespace e2e-tests-kubectl-f6tj5 deletion completed in 11.905206424s
+
+• [SLOW TEST:35.866 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Kubectl rolling-update
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should support rolling-update to same image  [Conformance]
+    /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[sig-storage] Projected configMap 
+  should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:41:33.523: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-7fq5l
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name projected-configmap-test-volume-e0e2bf76-91a4-11e9-bbf5-0e74dabf3615
+STEP: Creating a pod to test consume configMaps
+Jun 18 08:41:34.539: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-e0e5bc10-91a4-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-projected-7fq5l" to be "success or failure"
+Jun 18 08:41:34.585: INFO: Pod "pod-projected-configmaps-e0e5bc10-91a4-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 46.834511ms
+Jun 18 08:41:36.595: INFO: Pod "pod-projected-configmaps-e0e5bc10-91a4-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.056314424s
+Jun 18 08:41:38.598: INFO: Pod "pod-projected-configmaps-e0e5bc10-91a4-11e9-bbf5-0e74dabf3615": Phase="Running", Reason="", readiness=true. Elapsed: 4.059280221s
+Jun 18 08:41:40.601: INFO: Pod "pod-projected-configmaps-e0e5bc10-91a4-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.062282761s
+STEP: Saw pod success
+Jun 18 08:41:40.601: INFO: Pod "pod-projected-configmaps-e0e5bc10-91a4-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:41:40.603: INFO: Trying to get logs from node node5 pod pod-projected-configmaps-e0e5bc10-91a4-11e9-bbf5-0e74dabf3615 container projected-configmap-volume-test: 
+STEP: delete the pod
+Jun 18 08:41:40.623: INFO: Waiting for pod pod-projected-configmaps-e0e5bc10-91a4-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:41:40.628: INFO: Pod pod-projected-configmaps-e0e5bc10-91a4-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:41:40.628: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-7fq5l" for this suite.
+Jun 18 08:41:50.643: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:41:51.620: INFO: namespace: e2e-tests-projected-7fq5l, resource: bindings, ignored listing per whitelist
+Jun 18 08:41:51.651: INFO: namespace e2e-tests-projected-7fq5l deletion completed in 11.019177897s
+
+• [SLOW TEST:18.128 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:34
+  should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-network] Proxy version v1 
+  should proxy logs on node using proxy subresource  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] version v1
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:41:51.651: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename proxy
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-proxy-jlvb2
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should proxy logs on node using proxy subresource  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+Jun 18 08:41:52.684: INFO: (0) /api/v1/nodes/node1/proxy/logs/: 
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log
+anaconda/
+audit/
+boot.log>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-b8rs8
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should project all components that make up the projection API [Projection][NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name configmap-projected-all-test-volume-f22b9edf-91a4-11e9-bbf5-0e74dabf3615
+STEP: Creating secret with name secret-projected-all-test-volume-f22b9eca-91a4-11e9-bbf5-0e74dabf3615
+STEP: Creating a pod to test Check all projections for projected volume plugin
+Jun 18 08:42:03.536: INFO: Waiting up to 5m0s for pod "projected-volume-f22b9e8d-91a4-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-projected-b8rs8" to be "success or failure"
+Jun 18 08:42:03.549: INFO: Pod "projected-volume-f22b9e8d-91a4-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 12.996534ms
+Jun 18 08:42:05.567: INFO: Pod "projected-volume-f22b9e8d-91a4-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.030770449s
+STEP: Saw pod success
+Jun 18 08:42:05.567: INFO: Pod "projected-volume-f22b9e8d-91a4-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:42:05.570: INFO: Trying to get logs from node node5 pod projected-volume-f22b9e8d-91a4-11e9-bbf5-0e74dabf3615 container projected-all-volume-test: 
+STEP: delete the pod
+Jun 18 08:42:05.598: INFO: Waiting for pod projected-volume-f22b9e8d-91a4-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:42:05.600: INFO: Pod projected-volume-f22b9e8d-91a4-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] Projected combined
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:42:05.600: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-b8rs8" for this suite.
+Jun 18 08:42:13.621: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:42:13.670: INFO: namespace: e2e-tests-projected-b8rs8, resource: bindings, ignored listing per whitelist
+Jun 18 08:42:14.518: INFO: namespace e2e-tests-projected-b8rs8 deletion completed in 8.907149492s
+
+• [SLOW TEST:11.958 seconds]
+[sig-storage] Projected combined
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_combined.go:31
+  should project all components that make up the projection API [Projection][NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl replace 
+  should update a single-container pod's image  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:42:14.518: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-kgppm
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[BeforeEach] [k8s.io] Kubectl replace
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1563
+[It] should update a single-container pod's image  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: running the image docker.io/library/nginx:1.14-alpine
+Jun 18 08:42:15.577: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 run e2e-test-nginx-pod --generator=run-pod/v1 --image=docker.io/library/nginx:1.14-alpine --labels=run=e2e-test-nginx-pod --namespace=e2e-tests-kubectl-kgppm'
+Jun 18 08:42:15.666: INFO: stderr: ""
+Jun 18 08:42:15.666: INFO: stdout: "pod/e2e-test-nginx-pod created\n"
+STEP: verifying the pod e2e-test-nginx-pod is running
+STEP: verifying the pod e2e-test-nginx-pod was created
+Jun 18 08:42:20.716: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pod e2e-test-nginx-pod --namespace=e2e-tests-kubectl-kgppm -o json'
+Jun 18 08:42:20.794: INFO: stderr: ""
+Jun 18 08:42:20.794: INFO: stdout: "{\n    \"apiVersion\": \"v1\",\n    \"kind\": \"Pod\",\n    \"metadata\": {\n        \"creationTimestamp\": \"2019-06-18T08:42:15Z\",\n        \"labels\": {\n            \"run\": \"e2e-test-nginx-pod\"\n        },\n        \"name\": \"e2e-test-nginx-pod\",\n        \"namespace\": \"e2e-tests-kubectl-kgppm\",\n        \"resourceVersion\": \"13556780\",\n        \"selfLink\": \"/api/v1/namespaces/e2e-tests-kubectl-kgppm/pods/e2e-test-nginx-pod\",\n        \"uid\": \"f96854e4-91a4-11e9-8cfd-00163e000a67\"\n    },\n    \"spec\": {\n        \"containers\": [\n            {\n                \"image\": \"docker.io/library/nginx:1.14-alpine\",\n                \"imagePullPolicy\": \"IfNotPresent\",\n                \"name\": \"e2e-test-nginx-pod\",\n                \"resources\": {},\n                \"terminationMessagePath\": \"/dev/termination-log\",\n                \"terminationMessagePolicy\": \"File\",\n                \"volumeMounts\": [\n                    {\n                        \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n                        \"name\": \"default-token-sznwj\",\n                        \"readOnly\": true\n                    }\n                ]\n            }\n        ],\n        \"dnsPolicy\": \"ClusterFirst\",\n        \"enableServiceLinks\": true,\n        \"nodeName\": \"node5\",\n        \"priority\": 0,\n        \"restartPolicy\": \"Always\",\n        \"schedulerName\": \"default-scheduler\",\n        \"securityContext\": {},\n        \"serviceAccount\": \"default\",\n        \"serviceAccountName\": \"default\",\n        \"terminationGracePeriodSeconds\": 30,\n        \"tolerations\": [\n            {\n                \"effect\": \"NoExecute\",\n                \"key\": \"node.kubernetes.io/not-ready\",\n                \"operator\": \"Exists\",\n                \"tolerationSeconds\": 300\n            },\n            {\n                \"effect\": \"NoExecute\",\n                \"key\": \"node.kubernetes.io/unreachable\",\n                \"operator\": \"Exists\",\n                \"tolerationSeconds\": 300\n            }\n        ],\n        \"volumes\": [\n            {\n                \"name\": \"default-token-sznwj\",\n                \"secret\": {\n                    \"defaultMode\": 420,\n                    \"secretName\": \"default-token-sznwj\"\n                }\n            }\n        ]\n    },\n    \"status\": {\n        \"conditions\": [\n            {\n                \"lastProbeTime\": null,\n                \"lastTransitionTime\": \"2019-06-18T08:42:15Z\",\n                \"status\": \"True\",\n                \"type\": \"Initialized\"\n            },\n            {\n                \"lastProbeTime\": null,\n                \"lastTransitionTime\": \"2019-06-18T08:42:17Z\",\n                \"status\": \"True\",\n                \"type\": \"Ready\"\n            },\n            {\n                \"lastProbeTime\": null,\n                \"lastTransitionTime\": \"2019-06-18T08:42:17Z\",\n                \"status\": \"True\",\n                \"type\": \"ContainersReady\"\n            },\n            {\n                \"lastProbeTime\": null,\n                \"lastTransitionTime\": \"2019-06-18T08:42:15Z\",\n                \"status\": \"True\",\n                \"type\": \"PodScheduled\"\n            }\n        ],\n        \"containerStatuses\": [\n            {\n                \"containerID\": \"docker://a6212312e59af61eebc060ec7bd572117246813b531641043d38d55743fabb31\",\n                \"image\": \"nginx:1.14-alpine\",\n                \"imageID\": \"docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7\",\n                \"lastState\": {},\n                \"name\": \"e2e-test-nginx-pod\",\n                \"ready\": true,\n                \"restartCount\": 0,\n                \"state\": {\n                    \"running\": {\n                        \"startedAt\": \"2019-06-18T08:42:16Z\"\n                    }\n                }\n            }\n        ],\n        \"hostIP\": \"192.168.2.155\",\n        \"phase\": \"Running\",\n        \"podIP\": \"171.171.33.180\",\n        \"qosClass\": \"BestEffort\",\n        \"startTime\": \"2019-06-18T08:42:15Z\"\n    }\n}\n"
+STEP: replace the image in the pod
+Jun 18 08:42:20.794: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 replace -f - --namespace=e2e-tests-kubectl-kgppm'
+Jun 18 08:42:21.639: INFO: stderr: ""
+Jun 18 08:42:21.639: INFO: stdout: "pod/e2e-test-nginx-pod replaced\n"
+STEP: verifying the pod e2e-test-nginx-pod has the right image docker.io/library/busybox:1.29
+[AfterEach] [k8s.io] Kubectl replace
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1568
+Jun 18 08:42:21.643: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 delete pods e2e-test-nginx-pod --namespace=e2e-tests-kubectl-kgppm'
+Jun 18 08:42:28.399: INFO: stderr: ""
+Jun 18 08:42:28.399: INFO: stdout: "pod \"e2e-test-nginx-pod\" deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:42:28.399: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-kgppm" for this suite.
+Jun 18 08:42:38.513: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:42:38.577: INFO: namespace: e2e-tests-kubectl-kgppm, resource: bindings, ignored listing per whitelist
+Jun 18 08:42:38.827: INFO: namespace e2e-tests-kubectl-kgppm deletion completed in 10.424501226s
+
+• [SLOW TEST:24.309 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Kubectl replace
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should update a single-container pod's image  [Conformance]
+    /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSS
+------------------------------
+[sig-apps] Deployment 
+  RecreateDeployment should delete old pods and create new ones [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:42:38.827: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename deployment
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-deployment-f77wh
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:65
+[It] RecreateDeployment should delete old pods and create new ones [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+Jun 18 08:42:39.530: INFO: Creating deployment "test-recreate-deployment"
+Jun 18 08:42:39.589: INFO: Waiting deployment "test-recreate-deployment" to be updated to revision 1
+Jun 18 08:42:39.601: INFO: new replicaset for deployment "test-recreate-deployment" is yet to be created
+Jun 18 08:42:41.606: INFO: Waiting deployment "test-recreate-deployment" to complete
+Jun 18 08:42:41.609: INFO: Triggering a new rollout for deployment "test-recreate-deployment"
+Jun 18 08:42:41.615: INFO: Updating deployment test-recreate-deployment
+Jun 18 08:42:41.616: INFO: Watching deployment "test-recreate-deployment" to verify that new pods will not run with olds pods
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:59
+Jun 18 08:42:41.685: INFO: Deployment "test-recreate-deployment":
+&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-recreate-deployment,GenerateName:,Namespace:e2e-tests-deployment-f77wh,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-f77wh/deployments/test-recreate-deployment,UID:07a392e0-91a5-11e9-8cfd-00163e000a67,ResourceVersion:13556960,Generation:2,CreationTimestamp:2019-06-18 08:42:39 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,},Annotations:map[string]string{deployment.kubernetes.io/revision: 2,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:DeploymentSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},Strategy:DeploymentStrategy{Type:Recreate,RollingUpdate:nil,},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:1,UpdatedReplicas:1,AvailableReplicas:0,UnavailableReplicas:1,Conditions:[{Available False 2019-06-18 08:42:41 +0000 UTC 2019-06-18 08:42:41 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} {Progressing True 2019-06-18 08:42:41 +0000 UTC 2019-06-18 08:42:39 +0000 UTC ReplicaSetUpdated ReplicaSet "test-recreate-deployment-697fbf54bf" is progressing.}],ReadyReplicas:0,CollisionCount:nil,},}
+
+Jun 18 08:42:41.688: INFO: New ReplicaSet "test-recreate-deployment-697fbf54bf" of Deployment "test-recreate-deployment":
+&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-recreate-deployment-697fbf54bf,GenerateName:,Namespace:e2e-tests-deployment-f77wh,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-f77wh/replicasets/test-recreate-deployment-697fbf54bf,UID:08e4f71b-91a5-11e9-8cfd-00163e000a67,ResourceVersion:13556958,Generation:1,CreationTimestamp:2019-06-18 08:42:41 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: 697fbf54bf,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 1,deployment.kubernetes.io/revision: 2,},OwnerReferences:[{apps/v1 Deployment test-recreate-deployment 07a392e0-91a5-11e9-8cfd-00163e000a67 0xc002088ad7 0xc002088ad8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,pod-template-hash: 697fbf54bf,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: 697fbf54bf,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},}
+Jun 18 08:42:41.688: INFO: All old ReplicaSets of Deployment "test-recreate-deployment":
+Jun 18 08:42:41.688: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-recreate-deployment-96c9d79c8,GenerateName:,Namespace:e2e-tests-deployment-f77wh,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-f77wh/replicasets/test-recreate-deployment-96c9d79c8,UID:07ae614c-91a5-11e9-8cfd-00163e000a67,ResourceVersion:13556949,Generation:2,CreationTimestamp:2019-06-18 08:42:39 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: 96c9d79c8,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 1,deployment.kubernetes.io/revision: 1,},OwnerReferences:[{apps/v1 Deployment test-recreate-deployment 07a392e0-91a5-11e9-8cfd-00163e000a67 0xc002088b90 0xc002088b91}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*0,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,pod-template-hash: 96c9d79c8,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: 96c9d79c8,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{redis reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},}
+Jun 18 08:42:41.690: INFO: Pod "test-recreate-deployment-697fbf54bf-9cpz9" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-recreate-deployment-697fbf54bf-9cpz9,GenerateName:test-recreate-deployment-697fbf54bf-,Namespace:e2e-tests-deployment-f77wh,SelfLink:/api/v1/namespaces/e2e-tests-deployment-f77wh/pods/test-recreate-deployment-697fbf54bf-9cpz9,UID:08e5769a-91a5-11e9-8cfd-00163e000a67,ResourceVersion:13556961,Generation:0,CreationTimestamp:2019-06-18 08:42:41 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: 697fbf54bf,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet test-recreate-deployment-697fbf54bf 08e4f71b-91a5-11e9-8cfd-00163e000a67 0xc002089487 0xc002089488}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-5hjlw {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-5hjlw,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-5hjlw true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node5,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002089500} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002089520}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 08:42:41 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 08:42:41 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-18 08:42:41 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 08:42:41 +0000 UTC  }],Message:,Reason:,HostIP:192.168.2.155,PodIP:,StartTime:2019-06-18 08:42:41 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:42:41.690: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-deployment-f77wh" for this suite.
+Jun 18 08:42:50.516: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:42:50.574: INFO: namespace: e2e-tests-deployment-f77wh, resource: bindings, ignored listing per whitelist
+Jun 18 08:42:51.535: INFO: namespace e2e-tests-deployment-f77wh deletion completed in 9.84168269s
+
+• [SLOW TEST:12.708 seconds]
+[sig-apps] Deployment
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  RecreateDeployment should delete old pods and create new ones [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSS
+------------------------------
+[k8s.io] Variable Expansion 
+  should allow composing env vars into new env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Variable Expansion
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:42:51.535: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename var-expansion
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-var-expansion-97wfd
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should allow composing env vars into new env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test env composition
+Jun 18 08:42:51.732: INFO: Waiting up to 5m0s for pod "var-expansion-0ee89313-91a5-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-var-expansion-97wfd" to be "success or failure"
+Jun 18 08:42:51.735: INFO: Pod "var-expansion-0ee89313-91a5-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.296279ms
+Jun 18 08:42:53.737: INFO: Pod "var-expansion-0ee89313-91a5-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.005036608s
+STEP: Saw pod success
+Jun 18 08:42:53.737: INFO: Pod "var-expansion-0ee89313-91a5-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:42:53.739: INFO: Trying to get logs from node node5 pod var-expansion-0ee89313-91a5-11e9-bbf5-0e74dabf3615 container dapi-container: 
+STEP: delete the pod
+Jun 18 08:42:53.756: INFO: Waiting for pod var-expansion-0ee89313-91a5-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:42:53.757: INFO: Pod var-expansion-0ee89313-91a5-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [k8s.io] Variable Expansion
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:42:53.758: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-var-expansion-97wfd" for this suite.
+Jun 18 08:42:59.771: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:43:00.544: INFO: namespace: e2e-tests-var-expansion-97wfd, resource: bindings, ignored listing per whitelist
+Jun 18 08:43:00.828: INFO: namespace e2e-tests-var-expansion-97wfd deletion completed in 7.0674211s
+
+• [SLOW TEST:9.293 seconds]
+[k8s.io] Variable Expansion
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should allow composing env vars into new env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSS
+------------------------------
+[sig-apps] ReplicaSet 
+  should serve a basic image on each replica with a public image  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-apps] ReplicaSet
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:43:00.829: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename replicaset
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-replicaset-4cj28
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should serve a basic image on each replica with a public image  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+Jun 18 08:43:01.709: INFO: Creating ReplicaSet my-hostname-basic-14dbd558-91a5-11e9-bbf5-0e74dabf3615
+Jun 18 08:43:01.721: INFO: Pod name my-hostname-basic-14dbd558-91a5-11e9-bbf5-0e74dabf3615: Found 0 pods out of 1
+Jun 18 08:43:06.724: INFO: Pod name my-hostname-basic-14dbd558-91a5-11e9-bbf5-0e74dabf3615: Found 1 pods out of 1
+Jun 18 08:43:06.724: INFO: Ensuring a pod for ReplicaSet "my-hostname-basic-14dbd558-91a5-11e9-bbf5-0e74dabf3615" is running
+Jun 18 08:43:06.726: INFO: Pod "my-hostname-basic-14dbd558-91a5-11e9-bbf5-0e74dabf3615-rbrpc" is running (conditions: [{Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-06-18 08:43:02 +0000 UTC Reason: Message:} {Type:Ready Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-06-18 08:43:03 +0000 UTC Reason: Message:} {Type:ContainersReady Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-06-18 08:43:03 +0000 UTC Reason: Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-06-18 08:43:02 +0000 UTC Reason: Message:}])
+Jun 18 08:43:06.726: INFO: Trying to dial the pod
+Jun 18 08:43:11.735: INFO: Controller my-hostname-basic-14dbd558-91a5-11e9-bbf5-0e74dabf3615: Got expected result from replica 1 [my-hostname-basic-14dbd558-91a5-11e9-bbf5-0e74dabf3615-rbrpc]: "my-hostname-basic-14dbd558-91a5-11e9-bbf5-0e74dabf3615-rbrpc", 1 of 1 required successes so far
+[AfterEach] [sig-apps] ReplicaSet
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:43:11.735: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-replicaset-4cj28" for this suite.
+Jun 18 08:43:19.746: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:43:19.768: INFO: namespace: e2e-tests-replicaset-4cj28, resource: bindings, ignored listing per whitelist
+Jun 18 08:43:20.563: INFO: namespace e2e-tests-replicaset-4cj28 deletion completed in 8.824855071s
+
+• [SLOW TEST:19.734 seconds]
+[sig-apps] ReplicaSet
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should serve a basic image on each replica with a public image  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[sig-storage] Projected configMap 
+  should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:43:20.563: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-t56j7
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name projected-configmap-test-volume-2033eb41-91a5-11e9-bbf5-0e74dabf3615
+STEP: Creating a pod to test consume configMaps
+Jun 18 08:43:20.750: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-2034835e-91a5-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-projected-t56j7" to be "success or failure"
+Jun 18 08:43:20.759: INFO: Pod "pod-projected-configmaps-2034835e-91a5-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 9.205798ms
+Jun 18 08:43:22.763: INFO: Pod "pod-projected-configmaps-2034835e-91a5-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013149551s
+Jun 18 08:43:24.766: INFO: Pod "pod-projected-configmaps-2034835e-91a5-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.0158773s
+STEP: Saw pod success
+Jun 18 08:43:24.766: INFO: Pod "pod-projected-configmaps-2034835e-91a5-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:43:24.767: INFO: Trying to get logs from node node5 pod pod-projected-configmaps-2034835e-91a5-11e9-bbf5-0e74dabf3615 container projected-configmap-volume-test: 
+STEP: delete the pod
+Jun 18 08:43:24.780: INFO: Waiting for pod pod-projected-configmaps-2034835e-91a5-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:43:24.782: INFO: Pod pod-projected-configmaps-2034835e-91a5-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:43:24.783: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-t56j7" for this suite.
+Jun 18 08:43:32.795: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:43:32.906: INFO: namespace: e2e-tests-projected-t56j7, resource: bindings, ignored listing per whitelist
+Jun 18 08:43:33.518: INFO: namespace e2e-tests-projected-t56j7 deletion completed in 8.732724223s
+
+• [SLOW TEST:12.955 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:34
+  should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSS
+------------------------------
+[sig-network] Services 
+  should serve a basic endpoint from pods  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:43:33.518: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename services
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-services-69mj7
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:85
+[It] should serve a basic endpoint from pods  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating service endpoint-test2 in namespace e2e-tests-services-69mj7
+STEP: waiting up to 3m0s for service endpoint-test2 in namespace e2e-tests-services-69mj7 to expose endpoints map[]
+Jun 18 08:43:33.860: INFO: Get endpoints failed (3.505014ms elapsed, ignoring for 5s): endpoints "endpoint-test2" not found
+Jun 18 08:43:35.511: INFO: successfully validated that service endpoint-test2 in namespace e2e-tests-services-69mj7 exposes endpoints map[] (1.655066345s elapsed)
+STEP: Creating pod pod1 in namespace e2e-tests-services-69mj7
+STEP: waiting up to 3m0s for service endpoint-test2 in namespace e2e-tests-services-69mj7 to expose endpoints map[pod1:[80]]
+Jun 18 08:43:37.550: INFO: successfully validated that service endpoint-test2 in namespace e2e-tests-services-69mj7 exposes endpoints map[pod1:[80]] (2.03180845s elapsed)
+STEP: Creating pod pod2 in namespace e2e-tests-services-69mj7
+STEP: waiting up to 3m0s for service endpoint-test2 in namespace e2e-tests-services-69mj7 to expose endpoints map[pod1:[80] pod2:[80]]
+Jun 18 08:43:40.603: INFO: successfully validated that service endpoint-test2 in namespace e2e-tests-services-69mj7 exposes endpoints map[pod1:[80] pod2:[80]] (3.046950775s elapsed)
+STEP: Deleting pod pod1 in namespace e2e-tests-services-69mj7
+STEP: waiting up to 3m0s for service endpoint-test2 in namespace e2e-tests-services-69mj7 to expose endpoints map[pod2:[80]]
+Jun 18 08:43:41.621: INFO: successfully validated that service endpoint-test2 in namespace e2e-tests-services-69mj7 exposes endpoints map[pod2:[80]] (1.012844919s elapsed)
+STEP: Deleting pod pod2 in namespace e2e-tests-services-69mj7
+STEP: waiting up to 3m0s for service endpoint-test2 in namespace e2e-tests-services-69mj7 to expose endpoints map[]
+Jun 18 08:43:42.630: INFO: successfully validated that service endpoint-test2 in namespace e2e-tests-services-69mj7 exposes endpoints map[] (1.005242785s elapsed)
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:43:42.647: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-services-69mj7" for this suite.
+Jun 18 08:44:08.666: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:44:09.568: INFO: namespace: e2e-tests-services-69mj7, resource: bindings, ignored listing per whitelist
+Jun 18 08:44:09.664: INFO: namespace e2e-tests-services-69mj7 deletion completed in 27.006983525s
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:90
+
+• [SLOW TEST:36.146 seconds]
+[sig-network] Services
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22
+  should serve a basic endpoint from pods  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should set DefaultMode on files [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:44:09.664: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-gx95f
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should set DefaultMode on files [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward API volume plugin
+Jun 18 08:44:10.684: INFO: Waiting up to 5m0s for pod "downwardapi-volume-3df7e548-91a5-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-downward-api-gx95f" to be "success or failure"
+Jun 18 08:44:10.687: INFO: Pod "downwardapi-volume-3df7e548-91a5-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 3.115028ms
+Jun 18 08:44:12.695: INFO: Pod "downwardapi-volume-3df7e548-91a5-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01085103s
+Jun 18 08:44:14.698: INFO: Pod "downwardapi-volume-3df7e548-91a5-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.013791904s
+STEP: Saw pod success
+Jun 18 08:44:14.698: INFO: Pod "downwardapi-volume-3df7e548-91a5-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:44:14.700: INFO: Trying to get logs from node node5 pod downwardapi-volume-3df7e548-91a5-11e9-bbf5-0e74dabf3615 container client-container: 
+STEP: delete the pod
+Jun 18 08:44:14.721: INFO: Waiting for pod downwardapi-volume-3df7e548-91a5-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:44:14.723: INFO: Pod downwardapi-volume-3df7e548-91a5-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:44:14.723: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-downward-api-gx95f" for this suite.
+Jun 18 08:44:22.736: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:44:22.859: INFO: namespace: e2e-tests-downward-api-gx95f, resource: bindings, ignored listing per whitelist
+Jun 18 08:44:23.528: INFO: namespace e2e-tests-downward-api-gx95f deletion completed in 8.800718541s
+
+• [SLOW TEST:13.863 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should set DefaultMode on files [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:44:23.528: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-9wdds
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward API volume plugin
+Jun 18 08:44:24.545: INFO: Waiting up to 5m0s for pod "downwardapi-volume-46393a3a-91a5-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-projected-9wdds" to be "success or failure"
+Jun 18 08:44:24.551: INFO: Pod "downwardapi-volume-46393a3a-91a5-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 6.159231ms
+Jun 18 08:44:26.554: INFO: Pod "downwardapi-volume-46393a3a-91a5-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.009048263s
+STEP: Saw pod success
+Jun 18 08:44:26.554: INFO: Pod "downwardapi-volume-46393a3a-91a5-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:44:26.557: INFO: Trying to get logs from node node5 pod downwardapi-volume-46393a3a-91a5-11e9-bbf5-0e74dabf3615 container client-container: 
+STEP: delete the pod
+Jun 18 08:44:26.576: INFO: Waiting for pod downwardapi-volume-46393a3a-91a5-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:44:26.580: INFO: Pod downwardapi-volume-46393a3a-91a5-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:44:26.580: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-9wdds" for this suite.
+Jun 18 08:44:34.598: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:44:34.626: INFO: namespace: e2e-tests-projected-9wdds, resource: bindings, ignored listing per whitelist
+Jun 18 08:44:35.516: INFO: namespace e2e-tests-projected-9wdds deletion completed in 8.928313443s
+
+• [SLOW TEST:11.989 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Pods 
+  should get a host IP [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:44:35.517: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename pods
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-pods-vthsn
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:132
+[It] should get a host IP [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating pod
+Jun 18 08:44:38.672: INFO: Pod pod-hostip-4d744b5c-91a5-11e9-bbf5-0e74dabf3615 has hostIP: 192.168.2.155
+[AfterEach] [k8s.io] Pods
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:44:38.672: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-pods-vthsn" for this suite.
+Jun 18 08:45:04.685: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:45:04.750: INFO: namespace: e2e-tests-pods-vthsn, resource: bindings, ignored listing per whitelist
+Jun 18 08:45:05.519: INFO: namespace e2e-tests-pods-vthsn deletion completed in 26.843956149s
+
+• [SLOW TEST:30.003 seconds]
+[k8s.io] Pods
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should get a host IP [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSS
+------------------------------
+[sig-node] ConfigMap 
+  should be consumable via environment variable [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-node] ConfigMap
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:45:05.519: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename configmap
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-configmap-p2qfh
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable via environment variable [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap e2e-tests-configmap-p2qfh/configmap-test-5ec38657-91a5-11e9-bbf5-0e74dabf3615
+STEP: Creating a pod to test consume configMaps
+Jun 18 08:45:05.828: INFO: Waiting up to 5m0s for pod "pod-configmaps-5ec3f10c-91a5-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-configmap-p2qfh" to be "success or failure"
+Jun 18 08:45:06.522: INFO: Pod "pod-configmaps-5ec3f10c-91a5-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 693.812354ms
+Jun 18 08:45:08.525: INFO: Pod "pod-configmaps-5ec3f10c-91a5-11e9-bbf5-0e74dabf3615": Phase="Running", Reason="", readiness=true. Elapsed: 2.697008172s
+Jun 18 08:45:10.529: INFO: Pod "pod-configmaps-5ec3f10c-91a5-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.700281022s
+STEP: Saw pod success
+Jun 18 08:45:10.529: INFO: Pod "pod-configmaps-5ec3f10c-91a5-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:45:10.531: INFO: Trying to get logs from node node5 pod pod-configmaps-5ec3f10c-91a5-11e9-bbf5-0e74dabf3615 container env-test: 
+STEP: delete the pod
+Jun 18 08:45:10.547: INFO: Waiting for pod pod-configmaps-5ec3f10c-91a5-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:45:10.550: INFO: Pod pod-configmaps-5ec3f10c-91a5-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-node] ConfigMap
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:45:10.550: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-configmap-p2qfh" for this suite.
+Jun 18 08:45:18.568: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:45:18.661: INFO: namespace: e2e-tests-configmap-p2qfh, resource: bindings, ignored listing per whitelist
+Jun 18 08:45:19.519: INFO: namespace e2e-tests-configmap-p2qfh deletion completed in 8.966092197s
+
+• [SLOW TEST:14.000 seconds]
+[sig-node] ConfigMap
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap.go:31
+  should be consumable via environment variable [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should provide container's memory limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:45:19.519: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-6qpnf
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should provide container's memory limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward API volume plugin
+Jun 18 08:45:19.713: INFO: Waiting up to 5m0s for pod "downwardapi-volume-671ccb43-91a5-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-projected-6qpnf" to be "success or failure"
+Jun 18 08:45:19.716: INFO: Pod "downwardapi-volume-671ccb43-91a5-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.7027ms
+Jun 18 08:45:21.719: INFO: Pod "downwardapi-volume-671ccb43-91a5-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.00568801s
+Jun 18 08:45:23.722: INFO: Pod "downwardapi-volume-671ccb43-91a5-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.008741789s
+STEP: Saw pod success
+Jun 18 08:45:23.722: INFO: Pod "downwardapi-volume-671ccb43-91a5-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:45:23.724: INFO: Trying to get logs from node node5 pod downwardapi-volume-671ccb43-91a5-11e9-bbf5-0e74dabf3615 container client-container: 
+STEP: delete the pod
+Jun 18 08:45:23.737: INFO: Waiting for pod downwardapi-volume-671ccb43-91a5-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:45:23.741: INFO: Pod downwardapi-volume-671ccb43-91a5-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:45:23.742: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-6qpnf" for this suite.
+Jun 18 08:45:31.753: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:45:32.555: INFO: namespace: e2e-tests-projected-6qpnf, resource: bindings, ignored listing per whitelist
+Jun 18 08:45:32.599: INFO: namespace e2e-tests-projected-6qpnf deletion completed in 8.854128673s
+
+• [SLOW TEST:13.079 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should provide container's memory limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSS
+------------------------------
+[k8s.io] Probing container 
+  should have monotonically increasing restart count [Slow][NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:45:32.599: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename container-probe
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-container-probe-269hv
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:48
+[It] should have monotonically increasing restart count [Slow][NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating pod liveness-http in namespace e2e-tests-container-probe-269hv
+Jun 18 08:45:38.777: INFO: Started pod liveness-http in namespace e2e-tests-container-probe-269hv
+STEP: checking the pod's current state and verifying that restartCount is present
+Jun 18 08:45:38.779: INFO: Initial restart count of pod liveness-http is 0
+Jun 18 08:45:49.551: INFO: Restart count of pod e2e-tests-container-probe-269hv/liveness-http is now 1 (10.772495315s elapsed)
+Jun 18 08:46:11.581: INFO: Restart count of pod e2e-tests-container-probe-269hv/liveness-http is now 2 (32.801821114s elapsed)
+Jun 18 08:46:29.676: INFO: Restart count of pod e2e-tests-container-probe-269hv/liveness-http is now 3 (50.897099227s elapsed)
+Jun 18 08:46:51.517: INFO: Restart count of pod e2e-tests-container-probe-269hv/liveness-http is now 4 (1m12.738135114s elapsed)
+Jun 18 08:47:51.774: INFO: Restart count of pod e2e-tests-container-probe-269hv/liveness-http is now 5 (2m12.995212796s elapsed)
+STEP: deleting the pod
+[AfterEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:47:51.790: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-container-probe-269hv" for this suite.
+Jun 18 08:47:59.804: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:47:59.916: INFO: namespace: e2e-tests-container-probe-269hv, resource: bindings, ignored listing per whitelist
+Jun 18 08:48:00.116: INFO: namespace e2e-tests-container-probe-269hv deletion completed in 8.32078415s
+
+• [SLOW TEST:147.517 seconds]
+[k8s.io] Probing container
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should have monotonically increasing restart count [Slow][NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SS
+------------------------------
+[sig-apps] Daemon set [Serial] 
+  should retry creating failed daemon pods [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:48:00.116: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename daemonsets
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-daemonsets-r5j2d
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:102
+[It] should retry creating failed daemon pods [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a simple DaemonSet "daemon-set"
+STEP: Check that daemon pods launch on every node of the cluster.
+Jun 18 08:48:00.700: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 08:48:00.703: INFO: Number of nodes with available pods: 0
+Jun 18 08:48:00.703: INFO: Node node1 is running more than one daemon pod
+Jun 18 08:48:01.709: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 08:48:01.712: INFO: Number of nodes with available pods: 0
+Jun 18 08:48:01.713: INFO: Node node1 is running more than one daemon pod
+Jun 18 08:48:02.708: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 08:48:02.710: INFO: Number of nodes with available pods: 0
+Jun 18 08:48:02.710: INFO: Node node1 is running more than one daemon pod
+Jun 18 08:48:03.707: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 08:48:03.710: INFO: Number of nodes with available pods: 4
+Jun 18 08:48:03.710: INFO: Node node4 is running more than one daemon pod
+Jun 18 08:48:04.707: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 08:48:04.710: INFO: Number of nodes with available pods: 4
+Jun 18 08:48:04.710: INFO: Node node4 is running more than one daemon pod
+Jun 18 08:48:05.708: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 08:48:05.711: INFO: Number of nodes with available pods: 4
+Jun 18 08:48:05.711: INFO: Node node4 is running more than one daemon pod
+Jun 18 08:48:06.707: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 08:48:06.711: INFO: Number of nodes with available pods: 4
+Jun 18 08:48:06.711: INFO: Node node4 is running more than one daemon pod
+Jun 18 08:48:07.709: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 08:48:07.713: INFO: Number of nodes with available pods: 4
+Jun 18 08:48:07.713: INFO: Node node4 is running more than one daemon pod
+Jun 18 08:48:08.707: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 08:48:08.710: INFO: Number of nodes with available pods: 4
+Jun 18 08:48:08.710: INFO: Node node4 is running more than one daemon pod
+Jun 18 08:48:09.708: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 08:48:09.711: INFO: Number of nodes with available pods: 4
+Jun 18 08:48:09.711: INFO: Node node4 is running more than one daemon pod
+Jun 18 08:48:10.706: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 08:48:10.713: INFO: Number of nodes with available pods: 4
+Jun 18 08:48:10.713: INFO: Node node4 is running more than one daemon pod
+Jun 18 08:48:11.707: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 08:48:11.709: INFO: Number of nodes with available pods: 4
+Jun 18 08:48:11.709: INFO: Node node4 is running more than one daemon pod
+Jun 18 08:48:12.709: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 08:48:12.717: INFO: Number of nodes with available pods: 4
+Jun 18 08:48:12.717: INFO: Node node4 is running more than one daemon pod
+Jun 18 08:48:13.707: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 08:48:13.710: INFO: Number of nodes with available pods: 4
+Jun 18 08:48:13.710: INFO: Node node4 is running more than one daemon pod
+Jun 18 08:48:14.708: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 08:48:14.713: INFO: Number of nodes with available pods: 4
+Jun 18 08:48:14.713: INFO: Node node4 is running more than one daemon pod
+Jun 18 08:48:15.707: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 08:48:15.710: INFO: Number of nodes with available pods: 4
+Jun 18 08:48:15.710: INFO: Node node4 is running more than one daemon pod
+Jun 18 08:48:16.707: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 08:48:16.711: INFO: Number of nodes with available pods: 4
+Jun 18 08:48:16.711: INFO: Node node4 is running more than one daemon pod
+Jun 18 08:48:17.718: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 08:48:17.755: INFO: Number of nodes with available pods: 4
+Jun 18 08:48:17.755: INFO: Node node4 is running more than one daemon pod
+Jun 18 08:48:18.708: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 08:48:18.711: INFO: Number of nodes with available pods: 4
+Jun 18 08:48:18.711: INFO: Node node4 is running more than one daemon pod
+Jun 18 08:48:19.708: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 08:48:19.710: INFO: Number of nodes with available pods: 4
+Jun 18 08:48:19.710: INFO: Node node4 is running more than one daemon pod
+Jun 18 08:48:20.706: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 08:48:20.709: INFO: Number of nodes with available pods: 4
+Jun 18 08:48:20.709: INFO: Node node4 is running more than one daemon pod
+Jun 18 08:48:21.709: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 08:48:21.713: INFO: Number of nodes with available pods: 4
+Jun 18 08:48:21.713: INFO: Node node4 is running more than one daemon pod
+Jun 18 08:48:22.708: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 08:48:22.711: INFO: Number of nodes with available pods: 4
+Jun 18 08:48:22.711: INFO: Node node4 is running more than one daemon pod
+Jun 18 08:48:23.707: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 08:48:23.710: INFO: Number of nodes with available pods: 5
+Jun 18 08:48:23.710: INFO: Number of running nodes: 5, number of available pods: 5
+STEP: Set a daemon pod's phase to 'Failed', check that the daemon pod is revived.
+Jun 18 08:48:23.722: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 08:48:23.738: INFO: Number of nodes with available pods: 4
+Jun 18 08:48:23.738: INFO: Node node4 is running more than one daemon pod
+Jun 18 08:48:24.742: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 08:48:24.747: INFO: Number of nodes with available pods: 4
+Jun 18 08:48:24.747: INFO: Node node4 is running more than one daemon pod
+Jun 18 08:48:25.742: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 08:48:25.745: INFO: Number of nodes with available pods: 5
+Jun 18 08:48:25.745: INFO: Number of running nodes: 5, number of available pods: 5
+STEP: Wait for the failed daemon pod to be completely deleted.
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:68
+STEP: Deleting DaemonSet "daemon-set"
+STEP: deleting DaemonSet.extensions daemon-set in namespace e2e-tests-daemonsets-r5j2d, will wait for the garbage collector to delete the pods
+Jun 18 08:48:25.807: INFO: Deleting DaemonSet.extensions daemon-set took: 5.310826ms
+Jun 18 08:48:25.907: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.187973ms
+Jun 18 08:48:39.809: INFO: Number of nodes with available pods: 0
+Jun 18 08:48:39.809: INFO: Number of running nodes: 0, number of available pods: 0
+Jun 18 08:48:39.811: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/e2e-tests-daemonsets-r5j2d/daemonsets","resourceVersion":"13559147"},"items":null}
+
+Jun 18 08:48:39.813: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/e2e-tests-daemonsets-r5j2d/pods","resourceVersion":"13559147"},"items":null}
+
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:48:39.827: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-daemonsets-r5j2d" for this suite.
+Jun 18 08:48:46.526: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:48:46.632: INFO: namespace: e2e-tests-daemonsets-r5j2d, resource: bindings, ignored listing per whitelist
+Jun 18 08:48:47.545: INFO: namespace e2e-tests-daemonsets-r5j2d deletion completed in 7.714771488s
+
+• [SLOW TEST:47.429 seconds]
+[sig-apps] Daemon set [Serial]
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should retry creating failed daemon pods [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SS
+------------------------------
+[sig-network] Networking Granular Checks: Pods 
+  should function for node-pod communication: udp [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-network] Networking
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:48:47.545: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename pod-network-test
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-pod-network-test-s9hln
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should function for node-pod communication: udp [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Performing setup for networking test in namespace e2e-tests-pod-network-test-s9hln
+STEP: creating a selector
+STEP: Creating the service pods in kubernetes
+Jun 18 08:48:49.627: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable
+STEP: Creating test pods
+Jun 18 08:49:15.724: INFO: ExecWithOptions {Command:[/bin/sh -c echo 'hostName' | nc -w 1 -u 171.171.166.171 8081 | grep -v '^\s*$'] Namespace:e2e-tests-pod-network-test-s9hln PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 18 08:49:15.724: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+Jun 18 08:49:16.793: INFO: Found all expected endpoints: [netserver-0]
+Jun 18 08:49:16.795: INFO: ExecWithOptions {Command:[/bin/sh -c echo 'hostName' | nc -w 1 -u 171.171.104.3 8081 | grep -v '^\s*$'] Namespace:e2e-tests-pod-network-test-s9hln PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 18 08:49:16.795: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+Jun 18 08:49:18.518: INFO: Found all expected endpoints: [netserver-1]
+Jun 18 08:49:18.523: INFO: ExecWithOptions {Command:[/bin/sh -c echo 'hostName' | nc -w 1 -u 171.171.3.86 8081 | grep -v '^\s*$'] Namespace:e2e-tests-pod-network-test-s9hln PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 18 08:49:18.523: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+Jun 18 08:49:19.620: INFO: Found all expected endpoints: [netserver-2]
+Jun 18 08:49:19.625: INFO: ExecWithOptions {Command:[/bin/sh -c echo 'hostName' | nc -w 1 -u 171.171.33.150 8081 | grep -v '^\s*$'] Namespace:e2e-tests-pod-network-test-s9hln PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 18 08:49:19.625: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+Jun 18 08:49:20.696: INFO: Found all expected endpoints: [netserver-3]
+Jun 18 08:49:20.699: INFO: ExecWithOptions {Command:[/bin/sh -c echo 'hostName' | nc -w 1 -u 171.171.135.15 8081 | grep -v '^\s*$'] Namespace:e2e-tests-pod-network-test-s9hln PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 18 08:49:20.699: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+Jun 18 08:49:21.765: INFO: Found all expected endpoints: [netserver-4]
+[AfterEach] [sig-network] Networking
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:49:21.765: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-pod-network-test-s9hln" for this suite.
+Jun 18 08:49:50.552: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:49:51.642: INFO: namespace: e2e-tests-pod-network-test-s9hln, resource: bindings, ignored listing per whitelist
+Jun 18 08:49:51.680: INFO: namespace e2e-tests-pod-network-test-s9hln deletion completed in 29.90765821s
+
+• [SLOW TEST:64.134 seconds]
+[sig-network] Networking
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:25
+  Granular Checks: Pods
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:28
+    should function for node-pod communication: udp [NodeConformance] [Conformance]
+    /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  volume on default medium should have the correct mode [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:49:51.680: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-tlrn8
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] volume on default medium should have the correct mode [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test emptydir volume type on node default medium
+Jun 18 08:49:52.669: INFO: Waiting up to 5m0s for pod "pod-09ce3bcd-91a6-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-emptydir-tlrn8" to be "success or failure"
+Jun 18 08:49:52.674: INFO: Pod "pod-09ce3bcd-91a6-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 5.276835ms
+Jun 18 08:49:54.680: INFO: Pod "pod-09ce3bcd-91a6-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.011188693s
+STEP: Saw pod success
+Jun 18 08:49:54.680: INFO: Pod "pod-09ce3bcd-91a6-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:49:54.682: INFO: Trying to get logs from node node5 pod pod-09ce3bcd-91a6-11e9-bbf5-0e74dabf3615 container test-container: 
+STEP: delete the pod
+Jun 18 08:49:54.698: INFO: Waiting for pod pod-09ce3bcd-91a6-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:49:54.699: INFO: Pod pod-09ce3bcd-91a6-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:49:54.699: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-emptydir-tlrn8" for this suite.
+Jun 18 08:50:02.712: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:50:03.609: INFO: namespace: e2e-tests-emptydir-tlrn8, resource: bindings, ignored listing per whitelist
+Jun 18 08:50:03.643: INFO: namespace e2e-tests-emptydir-tlrn8 deletion completed in 8.94093136s
+
+• [SLOW TEST:11.963 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40
+  volume on default medium should have the correct mode [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSS
+------------------------------
+[k8s.io] Kubelet when scheduling a busybox command in a pod 
+  should print the output to logs [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:50:03.643: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename kubelet-test
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubelet-test-jc5n6
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37
+[It] should print the output to logs [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[AfterEach] [k8s.io] Kubelet
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:50:09.542: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubelet-test-jc5n6" for this suite.
+Jun 18 08:50:53.567: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:50:54.509: INFO: namespace: e2e-tests-kubelet-test-jc5n6, resource: bindings, ignored listing per whitelist
+Jun 18 08:50:54.599: INFO: namespace e2e-tests-kubelet-test-jc5n6 deletion completed in 45.049931186s
+
+• [SLOW TEST:50.956 seconds]
+[k8s.io] Kubelet
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  when scheduling a busybox command in a pod
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:40
+    should print the output to logs [NodeConformance] [Conformance]
+    /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSS
+------------------------------
+[sig-storage] ConfigMap 
+  optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:50:54.599: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename configmap
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-configmap-hkvdd
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name cm-test-opt-del-2fdebf34-91a6-11e9-bbf5-0e74dabf3615
+STEP: Creating configMap with name cm-test-opt-upd-2fdebf87-91a6-11e9-bbf5-0e74dabf3615
+STEP: Creating the pod
+STEP: Deleting configmap cm-test-opt-del-2fdebf34-91a6-11e9-bbf5-0e74dabf3615
+STEP: Updating configmap cm-test-opt-upd-2fdebf87-91a6-11e9-bbf5-0e74dabf3615
+STEP: Creating configMap with name cm-test-opt-create-2fdebfab-91a6-11e9-bbf5-0e74dabf3615
+STEP: waiting to observe update in volume
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:51:05.581: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-configmap-hkvdd" for this suite.
+Jun 18 08:51:33.726: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:51:34.660: INFO: namespace: e2e-tests-configmap-hkvdd, resource: bindings, ignored listing per whitelist
+Jun 18 08:51:35.645: INFO: namespace e2e-tests-configmap-hkvdd deletion completed in 30.040592034s
+
+• [SLOW TEST:41.045 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33
+  optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSS
+------------------------------
+[sig-apps] ReplicationController 
+  should release no longer matching pods [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-apps] ReplicationController
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:51:35.645: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename replication-controller
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-replication-controller-kxmbm
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should release no longer matching pods [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Given a ReplicationController is created
+STEP: When the matched label of one of its pods change
+Jun 18 08:51:37.544: INFO: Pod name pod-release: Found 0 pods out of 1
+Jun 18 08:51:42.547: INFO: Pod name pod-release: Found 1 pods out of 1
+STEP: Then the pod is released
+[AfterEach] [sig-apps] ReplicationController
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:51:43.620: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-replication-controller-kxmbm" for this suite.
+Jun 18 08:51:51.645: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:51:51.717: INFO: namespace: e2e-tests-replication-controller-kxmbm, resource: bindings, ignored listing per whitelist
+Jun 18 08:51:52.512: INFO: namespace e2e-tests-replication-controller-kxmbm deletion completed in 8.877283419s
+
+• [SLOW TEST:16.867 seconds]
+[sig-apps] ReplicationController
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should release no longer matching pods [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSS
+------------------------------
+[sig-storage] Downward API volume 
+  should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:51:52.512: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-bz5vd
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward API volume plugin
+Jun 18 08:51:52.695: INFO: Waiting up to 5m0s for pod "downwardapi-volume-515936c3-91a6-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-downward-api-bz5vd" to be "success or failure"
+Jun 18 08:51:52.696: INFO: Pod "downwardapi-volume-515936c3-91a6-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 1.619556ms
+Jun 18 08:51:54.700: INFO: Pod "downwardapi-volume-515936c3-91a6-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.004728682s
+STEP: Saw pod success
+Jun 18 08:51:54.700: INFO: Pod "downwardapi-volume-515936c3-91a6-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:51:54.702: INFO: Trying to get logs from node node5 pod downwardapi-volume-515936c3-91a6-11e9-bbf5-0e74dabf3615 container client-container: 
+STEP: delete the pod
+Jun 18 08:51:54.717: INFO: Waiting for pod downwardapi-volume-515936c3-91a6-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:51:54.719: INFO: Pod downwardapi-volume-515936c3-91a6-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:51:54.719: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-downward-api-bz5vd" for this suite.
+Jun 18 08:52:02.731: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:52:02.834: INFO: namespace: e2e-tests-downward-api-bz5vd, resource: bindings, ignored listing per whitelist
+Jun 18 08:52:03.051: INFO: namespace e2e-tests-downward-api-bz5vd deletion completed in 8.328334201s
+
+• [SLOW TEST:10.539 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSS
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should not be blocked by dependency circle [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:52:03.051: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename gc
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-gc-w9bg2
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should not be blocked by dependency circle [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+Jun 18 08:52:03.652: INFO: pod1.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod3", UID:"57e08728-91a6-11e9-8cfd-00163e000a67", Controller:(*bool)(0xc0015452aa), BlockOwnerDeletion:(*bool)(0xc0015452ab)}}
+Jun 18 08:52:03.655: INFO: pod2.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod1", UID:"57dfc239-91a6-11e9-8cfd-00163e000a67", Controller:(*bool)(0xc001fe9b3a), BlockOwnerDeletion:(*bool)(0xc001fe9b3b)}}
+Jun 18 08:52:03.658: INFO: pod3.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod2", UID:"57e01c81-91a6-11e9-8cfd-00163e000a67", Controller:(*bool)(0xc001545582), BlockOwnerDeletion:(*bool)(0xc001545583)}}
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:52:08.666: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-gc-w9bg2" for this suite.
+Jun 18 08:52:16.685: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:52:16.744: INFO: namespace: e2e-tests-gc-w9bg2, resource: bindings, ignored listing per whitelist
+Jun 18 08:52:17.557: INFO: namespace e2e-tests-gc-w9bg2 deletion completed in 8.886604381s
+
+• [SLOW TEST:14.506 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should not be blocked by dependency circle [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SS
+------------------------------
+[sig-storage] ConfigMap 
+  should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:52:17.557: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename configmap
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-configmap-5854d
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name configmap-test-volume-60bd1ede-91a6-11e9-bbf5-0e74dabf3615
+STEP: Creating a pod to test consume configMaps
+Jun 18 08:52:18.577: INFO: Waiting up to 5m0s for pod "pod-configmaps-60be9b92-91a6-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-configmap-5854d" to be "success or failure"
+Jun 18 08:52:18.587: INFO: Pod "pod-configmaps-60be9b92-91a6-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 10.137115ms
+Jun 18 08:52:20.590: INFO: Pod "pod-configmaps-60be9b92-91a6-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.012993667s
+STEP: Saw pod success
+Jun 18 08:52:20.590: INFO: Pod "pod-configmaps-60be9b92-91a6-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:52:20.593: INFO: Trying to get logs from node node5 pod pod-configmaps-60be9b92-91a6-11e9-bbf5-0e74dabf3615 container configmap-volume-test: 
+STEP: delete the pod
+Jun 18 08:52:20.618: INFO: Waiting for pod pod-configmaps-60be9b92-91a6-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:52:20.620: INFO: Pod pod-configmaps-60be9b92-91a6-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:52:20.620: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-configmap-5854d" for this suite.
+Jun 18 08:52:28.648: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:52:28.738: INFO: namespace: e2e-tests-configmap-5854d, resource: bindings, ignored listing per whitelist
+Jun 18 08:52:28.968: INFO: namespace e2e-tests-configmap-5854d deletion completed in 8.342332361s
+
+• [SLOW TEST:11.411 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33
+  should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should set mode on item file [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:52:28.968: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-fdnnd
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should set mode on item file [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward API volume plugin
+Jun 18 08:52:29.686: INFO: Waiting up to 5m0s for pod "downwardapi-volume-67658d9a-91a6-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-projected-fdnnd" to be "success or failure"
+Jun 18 08:52:29.688: INFO: Pod "downwardapi-volume-67658d9a-91a6-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.20246ms
+Jun 18 08:52:31.690: INFO: Pod "downwardapi-volume-67658d9a-91a6-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.004633353s
+Jun 18 08:52:33.693: INFO: Pod "downwardapi-volume-67658d9a-91a6-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.007015711s
+STEP: Saw pod success
+Jun 18 08:52:33.693: INFO: Pod "downwardapi-volume-67658d9a-91a6-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:52:33.696: INFO: Trying to get logs from node node5 pod downwardapi-volume-67658d9a-91a6-11e9-bbf5-0e74dabf3615 container client-container: 
+STEP: delete the pod
+Jun 18 08:52:33.708: INFO: Waiting for pod downwardapi-volume-67658d9a-91a6-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:52:33.710: INFO: Pod downwardapi-volume-67658d9a-91a6-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:52:33.710: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-fdnnd" for this suite.
+Jun 18 08:52:41.722: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:52:42.567: INFO: namespace: e2e-tests-projected-fdnnd, resource: bindings, ignored listing per whitelist
+Jun 18 08:52:42.760: INFO: namespace e2e-tests-projected-fdnnd deletion completed in 9.046928466s
+
+• [SLOW TEST:13.792 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should set mode on item file [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] 
+  should perform rolling updates and roll backs of template modifications [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:52:42.760: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename statefulset
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-statefulset-mgwpt
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:59
+[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:74
+STEP: Creating service test in namespace e2e-tests-statefulset-mgwpt
+[It] should perform rolling updates and roll backs of template modifications [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a new StatefulSet
+Jun 18 08:52:43.701: INFO: Found 0 stateful pods, waiting for 3
+Jun 18 08:52:53.705: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true
+Jun 18 08:52:53.705: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true
+Jun 18 08:52:53.705: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true
+Jun 18 08:52:53.712: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 exec --namespace=e2e-tests-statefulset-mgwpt ss2-1 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+Jun 18 08:52:54.599: INFO: stderr: ""
+Jun 18 08:52:54.599: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+Jun 18 08:52:54.599: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss2-1: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+STEP: Updating StatefulSet template: update image from docker.io/library/nginx:1.14-alpine to docker.io/library/nginx:1.15-alpine
+Jun 18 08:53:04.629: INFO: Updating stateful set ss2
+STEP: Creating a new revision
+STEP: Updating Pods in reverse ordinal order
+Jun 18 08:53:14.647: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 exec --namespace=e2e-tests-statefulset-mgwpt ss2-1 -- /bin/sh -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun 18 08:53:14.796: INFO: stderr: ""
+Jun 18 08:53:14.796: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+Jun 18 08:53:14.796: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss2-1: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+Jun 18 08:53:35.538: INFO: Waiting for StatefulSet e2e-tests-statefulset-mgwpt/ss2 to complete update
+Jun 18 08:53:35.538: INFO: Waiting for Pod e2e-tests-statefulset-mgwpt/ss2-0 to have revision ss2-c79899b9 update revision ss2-787997d666
+STEP: Rolling back to a previous revision
+Jun 18 08:53:45.544: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 exec --namespace=e2e-tests-statefulset-mgwpt ss2-1 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+Jun 18 08:53:45.693: INFO: stderr: ""
+Jun 18 08:53:45.694: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+Jun 18 08:53:45.694: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss2-1: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+Jun 18 08:53:55.721: INFO: Updating stateful set ss2
+STEP: Rolling back update in reverse ordinal order
+Jun 18 08:54:05.734: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 exec --namespace=e2e-tests-statefulset-mgwpt ss2-1 -- /bin/sh -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun 18 08:54:05.888: INFO: stderr: ""
+Jun 18 08:54:05.888: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+Jun 18 08:54:05.888: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss2-1: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+Jun 18 08:54:26.551: INFO: Waiting for StatefulSet e2e-tests-statefulset-mgwpt/ss2 to complete update
+[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:85
+Jun 18 08:54:36.557: INFO: Deleting all statefulset in ns e2e-tests-statefulset-mgwpt
+Jun 18 08:54:36.560: INFO: Scaling statefulset ss2 to 0
+Jun 18 08:54:56.575: INFO: Waiting for statefulset status.replicas updated to 0
+Jun 18 08:54:56.577: INFO: Deleting statefulset ss2
+[AfterEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:54:56.594: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-statefulset-mgwpt" for this suite.
+Jun 18 08:55:04.616: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:55:04.685: INFO: namespace: e2e-tests-statefulset-mgwpt, resource: bindings, ignored listing per whitelist
+Jun 18 08:55:06.618: INFO: namespace e2e-tests-statefulset-mgwpt deletion completed in 10.019633276s
+
+• [SLOW TEST:143.858 seconds]
+[sig-apps] StatefulSet
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should perform rolling updates and roll backs of template modifications [Conformance]
+    /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSS
+------------------------------
+[sig-apps] Deployment 
+  deployment should delete old replica sets [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:55:06.619: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename deployment
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-deployment-5rnsh
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:65
+[It] deployment should delete old replica sets [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+Jun 18 08:55:07.525: INFO: Pod name cleanup-pod: Found 0 pods out of 1
+Jun 18 08:55:12.528: INFO: Pod name cleanup-pod: Found 1 pods out of 1
+STEP: ensuring each pod is running
+Jun 18 08:55:12.529: INFO: Creating deployment test-cleanup-deployment
+STEP: Waiting for deployment test-cleanup-deployment history to be cleaned up
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:59
+Jun 18 08:55:12.543: INFO: Deployment "test-cleanup-deployment":
+&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-cleanup-deployment,GenerateName:,Namespace:e2e-tests-deployment-5rnsh,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-5rnsh/deployments/test-cleanup-deployment,UID:c8767e36-91a6-11e9-8cfd-00163e000a67,ResourceVersion:13561812,Generation:1,CreationTimestamp:2019-06-18 08:55:12 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:DeploymentSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{redis reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*0,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:0,Replicas:0,UpdatedReplicas:0,AvailableReplicas:0,UnavailableReplicas:0,Conditions:[],ReadyReplicas:0,CollisionCount:nil,},}
+
+Jun 18 08:55:12.545: INFO: New ReplicaSet of Deployment "test-cleanup-deployment" is nil.
+Jun 18 08:55:12.545: INFO: All old ReplicaSets of Deployment "test-cleanup-deployment":
+Jun 18 08:55:12.545: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-cleanup-controller,GenerateName:,Namespace:e2e-tests-deployment-5rnsh,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-5rnsh/replicasets/test-cleanup-controller,UID:c578869e-91a6-11e9-8cfd-00163e000a67,ResourceVersion:13561813,Generation:1,CreationTimestamp:2019-06-18 08:55:07 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,pod: nginx,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 Deployment test-cleanup-deployment c8767e36-91a6-11e9-8cfd-00163e000a67 0xc002672237 0xc002672238}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,pod: nginx,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,pod: nginx,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[],},}
+Jun 18 08:55:12.550: INFO: Pod "test-cleanup-controller-7rrxb" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-cleanup-controller-7rrxb,GenerateName:test-cleanup-controller-,Namespace:e2e-tests-deployment-5rnsh,SelfLink:/api/v1/namespaces/e2e-tests-deployment-5rnsh/pods/test-cleanup-controller-7rrxb,UID:c57b7299-91a6-11e9-8cfd-00163e000a67,ResourceVersion:13561793,Generation:0,CreationTimestamp:2019-06-18 08:55:07 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,pod: nginx,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet test-cleanup-controller c578869e-91a6-11e9-8cfd-00163e000a67 0xc0022393a7 0xc0022393a8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-lwsp6 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-lwsp6,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-lwsp6 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node5,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002239420} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002239440}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 08:55:07 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 08:55:08 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 08:55:08 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-18 08:55:07 +0000 UTC  }],Message:,Reason:,HostIP:192.168.2.155,PodIP:171.171.33.157,StartTime:2019-06-18 08:55:07 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-18 08:55:08 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://07e99258752bfa72bd4b86050960326aeef2315198018f6a97d3f1c36c5a9355}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:55:12.550: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-deployment-5rnsh" for this suite.
+Jun 18 08:55:20.598: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:55:21.616: INFO: namespace: e2e-tests-deployment-5rnsh, resource: bindings, ignored listing per whitelist
+Jun 18 08:55:22.550: INFO: namespace e2e-tests-deployment-5rnsh deletion completed in 9.990220614s
+
+• [SLOW TEST:15.932 seconds]
+[sig-apps] Deployment
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  deployment should delete old replica sets [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSS
+------------------------------
+[sig-network] Services 
+  should serve multiport endpoints from pods  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:55:22.550: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename services
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-services-j9frk
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:85
+[It] should serve multiport endpoints from pods  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating service multi-endpoint-test in namespace e2e-tests-services-j9frk
+STEP: waiting up to 3m0s for service multi-endpoint-test in namespace e2e-tests-services-j9frk to expose endpoints map[]
+Jun 18 08:55:22.758: INFO: Get endpoints failed (3.685824ms elapsed, ignoring for 5s): endpoints "multi-endpoint-test" not found
+Jun 18 08:55:23.761: INFO: successfully validated that service multi-endpoint-test in namespace e2e-tests-services-j9frk exposes endpoints map[] (1.006491522s elapsed)
+STEP: Creating pod pod1 in namespace e2e-tests-services-j9frk
+STEP: waiting up to 3m0s for service multi-endpoint-test in namespace e2e-tests-services-j9frk to expose endpoints map[pod1:[100]]
+Jun 18 08:55:25.784: INFO: successfully validated that service multi-endpoint-test in namespace e2e-tests-services-j9frk exposes endpoints map[pod1:[100]] (2.019116952s elapsed)
+STEP: Creating pod pod2 in namespace e2e-tests-services-j9frk
+STEP: waiting up to 3m0s for service multi-endpoint-test in namespace e2e-tests-services-j9frk to expose endpoints map[pod1:[100] pod2:[101]]
+Jun 18 08:55:27.806: INFO: successfully validated that service multi-endpoint-test in namespace e2e-tests-services-j9frk exposes endpoints map[pod1:[100] pod2:[101]] (2.019281382s elapsed)
+STEP: Deleting pod pod1 in namespace e2e-tests-services-j9frk
+STEP: waiting up to 3m0s for service multi-endpoint-test in namespace e2e-tests-services-j9frk to expose endpoints map[pod2:[101]]
+Jun 18 08:55:29.530: INFO: successfully validated that service multi-endpoint-test in namespace e2e-tests-services-j9frk exposes endpoints map[pod2:[101]] (1.720866614s elapsed)
+STEP: Deleting pod pod2 in namespace e2e-tests-services-j9frk
+STEP: waiting up to 3m0s for service multi-endpoint-test in namespace e2e-tests-services-j9frk to expose endpoints map[]
+Jun 18 08:55:29.541: INFO: successfully validated that service multi-endpoint-test in namespace e2e-tests-services-j9frk exposes endpoints map[] (4.609621ms elapsed)
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:55:29.564: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-services-j9frk" for this suite.
+Jun 18 08:55:55.588: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:55:57.678: INFO: namespace: e2e-tests-services-j9frk, resource: bindings, ignored listing per whitelist
+Jun 18 08:55:58.567: INFO: namespace e2e-tests-services-j9frk deletion completed in 28.996699887s
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:90
+
+• [SLOW TEST:36.017 seconds]
+[sig-network] Services
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22
+  should serve multiport endpoints from pods  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[sig-storage] Secrets 
+  should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Secrets
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:55:58.567: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename secrets
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-secrets-dbwrq
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating secret with name secret-test-e477235e-91a6-11e9-bbf5-0e74dabf3615
+STEP: Creating a pod to test consume secrets
+Jun 18 08:55:59.527: INFO: Waiting up to 5m0s for pod "pod-secrets-e47834e4-91a6-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-secrets-dbwrq" to be "success or failure"
+Jun 18 08:55:59.544: INFO: Pod "pod-secrets-e47834e4-91a6-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 16.675129ms
+Jun 18 08:56:01.550: INFO: Pod "pod-secrets-e47834e4-91a6-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.022500807s
+STEP: Saw pod success
+Jun 18 08:56:01.550: INFO: Pod "pod-secrets-e47834e4-91a6-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:56:01.553: INFO: Trying to get logs from node node5 pod pod-secrets-e47834e4-91a6-11e9-bbf5-0e74dabf3615 container secret-volume-test: 
+STEP: delete the pod
+Jun 18 08:56:01.575: INFO: Waiting for pod pod-secrets-e47834e4-91a6-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:56:01.580: INFO: Pod pod-secrets-e47834e4-91a6-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:56:01.580: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-secrets-dbwrq" for this suite.
+Jun 18 08:56:11.597: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:56:12.555: INFO: namespace: e2e-tests-secrets-dbwrq, resource: bindings, ignored listing per whitelist
+Jun 18 08:56:12.598: INFO: namespace e2e-tests-secrets-dbwrq deletion completed in 11.012754703s
+
+• [SLOW TEST:14.031 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:34
+  should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSS
+------------------------------
+[sig-storage] Projected configMap 
+  should be consumable from pods in volume as non-root [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:56:12.598: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-cvj6n
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume as non-root [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name projected-configmap-test-volume-eccf38f5-91a6-11e9-bbf5-0e74dabf3615
+STEP: Creating a pod to test consume configMaps
+Jun 18 08:56:13.524: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-ecd04d2a-91a6-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-projected-cvj6n" to be "success or failure"
+Jun 18 08:56:13.530: INFO: Pod "pod-projected-configmaps-ecd04d2a-91a6-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 6.032037ms
+Jun 18 08:56:15.552: INFO: Pod "pod-projected-configmaps-ecd04d2a-91a6-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.027630207s
+Jun 18 08:56:17.555: INFO: Pod "pod-projected-configmaps-ecd04d2a-91a6-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.030392931s
+STEP: Saw pod success
+Jun 18 08:56:17.555: INFO: Pod "pod-projected-configmaps-ecd04d2a-91a6-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:56:17.556: INFO: Trying to get logs from node node5 pod pod-projected-configmaps-ecd04d2a-91a6-11e9-bbf5-0e74dabf3615 container projected-configmap-volume-test: 
+STEP: delete the pod
+Jun 18 08:56:17.574: INFO: Waiting for pod pod-projected-configmaps-ecd04d2a-91a6-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:56:17.576: INFO: Pod pod-projected-configmaps-ecd04d2a-91a6-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:56:17.576: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-cvj6n" for this suite.
+Jun 18 08:56:27.596: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:56:27.688: INFO: namespace: e2e-tests-projected-cvj6n, resource: bindings, ignored listing per whitelist
+Jun 18 08:56:28.562: INFO: namespace e2e-tests-projected-cvj6n deletion completed in 10.98247821s
+
+• [SLOW TEST:15.964 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:34
+  should be consumable from pods in volume as non-root [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Pods 
+  should support retrieving logs from the container over websockets [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:56:28.563: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename pods
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-pods-mqz77
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:132
+[It] should support retrieving logs from the container over websockets [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+Jun 18 08:56:28.739: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: creating the pod
+STEP: submitting the pod to kubernetes
+[AfterEach] [k8s.io] Pods
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:56:34.564: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-pods-mqz77" for this suite.
+Jun 18 08:57:18.624: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:57:19.632: INFO: namespace: e2e-tests-pods-mqz77, resource: bindings, ignored listing per whitelist
+Jun 18 08:57:19.641: INFO: namespace e2e-tests-pods-mqz77 deletion completed in 45.04883894s
+
+• [SLOW TEST:51.079 seconds]
+[k8s.io] Pods
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should support retrieving logs from the container over websockets [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[sig-storage] Secrets 
+  should be consumable from pods in volume with mappings and Item Mode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Secrets
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:57:19.642: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename secrets
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-secrets-vc2zq
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings and Item Mode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating secret with name secret-test-map-14d89679-91a7-11e9-bbf5-0e74dabf3615
+STEP: Creating a pod to test consume secrets
+Jun 18 08:57:20.689: INFO: Waiting up to 5m0s for pod "pod-secrets-14d91699-91a7-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-secrets-vc2zq" to be "success or failure"
+Jun 18 08:57:20.690: INFO: Pod "pod-secrets-14d91699-91a7-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 1.315155ms
+Jun 18 08:57:22.692: INFO: Pod "pod-secrets-14d91699-91a7-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.003769374s
+Jun 18 08:57:24.695: INFO: Pod "pod-secrets-14d91699-91a7-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.006814324s
+STEP: Saw pod success
+Jun 18 08:57:24.695: INFO: Pod "pod-secrets-14d91699-91a7-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:57:24.697: INFO: Trying to get logs from node node5 pod pod-secrets-14d91699-91a7-11e9-bbf5-0e74dabf3615 container secret-volume-test: 
+STEP: delete the pod
+Jun 18 08:57:24.712: INFO: Waiting for pod pod-secrets-14d91699-91a7-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:57:24.717: INFO: Pod pod-secrets-14d91699-91a7-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:57:24.717: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-secrets-vc2zq" for this suite.
+Jun 18 08:57:33.558: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:57:33.643: INFO: namespace: e2e-tests-secrets-vc2zq, resource: bindings, ignored listing per whitelist
+Jun 18 08:57:34.621: INFO: namespace e2e-tests-secrets-vc2zq deletion completed in 9.901002948s
+
+• [SLOW TEST:14.979 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:34
+  should be consumable from pods in volume with mappings and Item Mode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (root,0666,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:57:34.621: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-g9w48
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (root,0666,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test emptydir 0666 on node default medium
+Jun 18 08:57:35.734: INFO: Waiting up to 5m0s for pod "pod-1dd00df7-91a7-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-emptydir-g9w48" to be "success or failure"
+Jun 18 08:57:35.737: INFO: Pod "pod-1dd00df7-91a7-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.627665ms
+Jun 18 08:57:37.740: INFO: Pod "pod-1dd00df7-91a7-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.00553511s
+STEP: Saw pod success
+Jun 18 08:57:37.740: INFO: Pod "pod-1dd00df7-91a7-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 08:57:37.742: INFO: Trying to get logs from node node5 pod pod-1dd00df7-91a7-11e9-bbf5-0e74dabf3615 container test-container: 
+STEP: delete the pod
+Jun 18 08:57:37.756: INFO: Waiting for pod pod-1dd00df7-91a7-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 08:57:37.758: INFO: Pod pod-1dd00df7-91a7-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:57:37.758: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-emptydir-g9w48" for this suite.
+Jun 18 08:57:44.521: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:57:44.559: INFO: namespace: e2e-tests-emptydir-g9w48, resource: bindings, ignored listing per whitelist
+Jun 18 08:57:44.841: INFO: namespace e2e-tests-emptydir-g9w48 deletion completed in 7.074118631s
+
+• [SLOW TEST:10.220 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40
+  should support (root,0666,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Proxy server 
+  should support proxy with --port 0  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:57:44.841: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-vp5tc
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[It] should support proxy with --port 0  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: starting the proxy server
+Jun 18 08:57:45.512: INFO: Asynchronously running '/usr/local/bin/kubectl kubectl --kubeconfig=/tmp/kubeconfig-656024001 proxy -p 0 --disable-filter'
+STEP: curling proxy /api/ output
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:57:45.579: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-vp5tc" for this suite.
+Jun 18 08:57:53.595: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:57:53.650: INFO: namespace: e2e-tests-kubectl-vp5tc, resource: bindings, ignored listing per whitelist
+Jun 18 08:57:54.519: INFO: namespace e2e-tests-kubectl-vp5tc deletion completed in 8.934777329s
+
+• [SLOW TEST:9.677 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Proxy server
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should support proxy with --port 0  [Conformance]
+    /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSS
+------------------------------
+[k8s.io] Probing container 
+  should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:57:54.519: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename container-probe
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-container-probe-jbbk7
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:48
+[It] should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating pod liveness-http in namespace e2e-tests-container-probe-jbbk7
+Jun 18 08:57:56.716: INFO: Started pod liveness-http in namespace e2e-tests-container-probe-jbbk7
+STEP: checking the pod's current state and verifying that restartCount is present
+Jun 18 08:57:56.719: INFO: Initial restart count of pod liveness-http is 0
+Jun 18 08:58:19.585: INFO: Restart count of pod e2e-tests-container-probe-jbbk7/liveness-http is now 1 (22.865801849s elapsed)
+STEP: deleting the pod
+[AfterEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:58:19.615: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-container-probe-jbbk7" for this suite.
+Jun 18 08:58:27.652: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:58:27.694: INFO: namespace: e2e-tests-container-probe-jbbk7, resource: bindings, ignored listing per whitelist
+Jun 18 08:58:28.541: INFO: namespace e2e-tests-container-probe-jbbk7 deletion completed in 8.917794055s
+
+• [SLOW TEST:34.022 seconds]
+[k8s.io] Probing container
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should update annotations on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:58:28.541: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-q7lqb
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should update annotations on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating the pod
+Jun 18 08:58:31.515: INFO: Successfully updated pod "annotationupdate3d6596a2-91a7-11e9-bbf5-0e74dabf3615"
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 08:58:33.534: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-downward-api-q7lqb" for this suite.
+Jun 18 08:58:57.563: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 08:58:57.598: INFO: namespace: e2e-tests-downward-api-q7lqb, resource: bindings, ignored listing per whitelist
+Jun 18 08:58:57.874: INFO: namespace e2e-tests-downward-api-q7lqb deletion completed in 24.324332556s
+
+• [SLOW TEST:29.334 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should update annotations on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Guestbook application 
+  should create and stop a working application  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 08:58:57.875: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-wjvln
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[It] should create and stop a working application  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating all guestbook components
+Jun 18 08:58:58.653: INFO: apiVersion: v1
+kind: Service
+metadata:
+  name: redis-slave
+  labels:
+    app: redis
+    role: slave
+    tier: backend
+spec:
+  ports:
+  - port: 6379
+  selector:
+    app: redis
+    role: slave
+    tier: backend
+
+Jun 18 08:58:58.653: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 create -f - --namespace=e2e-tests-kubectl-wjvln'
+Jun 18 08:59:00.528: INFO: stderr: ""
+Jun 18 08:59:00.528: INFO: stdout: "service/redis-slave created\n"
+Jun 18 08:59:00.529: INFO: apiVersion: v1
+kind: Service
+metadata:
+  name: redis-master
+  labels:
+    app: redis
+    role: master
+    tier: backend
+spec:
+  ports:
+  - port: 6379
+    targetPort: 6379
+  selector:
+    app: redis
+    role: master
+    tier: backend
+
+Jun 18 08:59:00.529: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 create -f - --namespace=e2e-tests-kubectl-wjvln'
+Jun 18 08:59:00.738: INFO: stderr: ""
+Jun 18 08:59:00.739: INFO: stdout: "service/redis-master created\n"
+Jun 18 08:59:00.739: INFO: apiVersion: v1
+kind: Service
+metadata:
+  name: frontend
+  labels:
+    app: guestbook
+    tier: frontend
+spec:
+  # if your cluster supports it, uncomment the following to automatically create
+  # an external load-balanced IP for the frontend service.
+  # type: LoadBalancer
+  ports:
+  - port: 80
+  selector:
+    app: guestbook
+    tier: frontend
+
+Jun 18 08:59:00.739: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 create -f - --namespace=e2e-tests-kubectl-wjvln'
+Jun 18 08:59:01.691: INFO: stderr: ""
+Jun 18 08:59:01.691: INFO: stdout: "service/frontend created\n"
+Jun 18 08:59:01.691: INFO: apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: frontend
+spec:
+  replicas: 3
+  template:
+    metadata:
+      labels:
+        app: guestbook
+        tier: frontend
+    spec:
+      containers:
+      - name: php-redis
+        image: reg.kpaas.io/google-samples/gb-frontend:v6
+        resources:
+          requests:
+            cpu: 100m
+            memory: 100Mi
+        env:
+        - name: GET_HOSTS_FROM
+          value: dns
+          # If your cluster config does not include a dns service, then to
+          # instead access environment variables to find service host
+          # info, comment out the 'value: dns' line above, and uncomment the
+          # line below:
+          # value: env
+        ports:
+        - containerPort: 80
+
+Jun 18 08:59:01.691: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 create -f - --namespace=e2e-tests-kubectl-wjvln'
+Jun 18 08:59:02.612: INFO: stderr: ""
+Jun 18 08:59:02.612: INFO: stdout: "deployment.extensions/frontend created\n"
+Jun 18 08:59:02.612: INFO: apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: redis-master
+spec:
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: redis
+        role: master
+        tier: backend
+    spec:
+      containers:
+      - name: master
+        image: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0
+        resources:
+          requests:
+            cpu: 100m
+            memory: 100Mi
+        ports:
+        - containerPort: 6379
+
+Jun 18 08:59:02.612: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 create -f - --namespace=e2e-tests-kubectl-wjvln'
+Jun 18 08:59:03.539: INFO: stderr: ""
+Jun 18 08:59:03.539: INFO: stdout: "deployment.extensions/redis-master created\n"
+Jun 18 08:59:03.539: INFO: apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: redis-slave
+spec:
+  replicas: 2
+  template:
+    metadata:
+      labels:
+        app: redis
+        role: slave
+        tier: backend
+    spec:
+      containers:
+      - name: slave
+        image: reg.kpaas.io/google-samples/gb-redisslave:v3
+        resources:
+          requests:
+            cpu: 100m
+            memory: 100Mi
+        env:
+        - name: GET_HOSTS_FROM
+          value: dns
+          # If your cluster config does not include a dns service, then to
+          # instead access an environment variable to find the master
+          # service's host, comment out the 'value: dns' line above, and
+          # uncomment the line below:
+          # value: env
+        ports:
+        - containerPort: 6379
+
+Jun 18 08:59:03.539: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 create -f - --namespace=e2e-tests-kubectl-wjvln'
+Jun 18 08:59:03.743: INFO: stderr: ""
+Jun 18 08:59:03.743: INFO: stdout: "deployment.extensions/redis-slave created\n"
+STEP: validating guestbook app
+Jun 18 08:59:03.743: INFO: Waiting for all frontend pods to be Running.
+Jun 18 09:00:48.798: INFO: Waiting for frontend to serve content.
+Jun 18 09:00:49.650: INFO: Trying to add a new entry to the guestbook.
+Jun 18 09:00:50.513: INFO: Verifying that added entry can be retrieved.
+STEP: using delete to clean up resources
+Jun 18 09:00:50.540: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-wjvln'
+Jun 18 09:00:50.677: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+Jun 18 09:00:50.677: INFO: stdout: "service \"redis-slave\" force deleted\n"
+STEP: using delete to clean up resources
+Jun 18 09:00:50.678: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-wjvln'
+Jun 18 09:00:54.571: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+Jun 18 09:00:54.571: INFO: stdout: "service \"redis-master\" force deleted\n"
+STEP: using delete to clean up resources
+Jun 18 09:00:54.571: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-wjvln'
+Jun 18 09:00:55.659: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+Jun 18 09:00:55.659: INFO: stdout: "service \"frontend\" force deleted\n"
+STEP: using delete to clean up resources
+Jun 18 09:00:55.659: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-wjvln'
+Jun 18 09:00:56.591: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+Jun 18 09:00:56.591: INFO: stdout: "deployment.extensions \"frontend\" force deleted\n"
+STEP: using delete to clean up resources
+Jun 18 09:00:56.591: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-wjvln'
+Jun 18 09:00:57.548: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+Jun 18 09:00:57.548: INFO: stdout: "deployment.extensions \"redis-master\" force deleted\n"
+STEP: using delete to clean up resources
+Jun 18 09:00:57.548: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-wjvln'
+Jun 18 09:00:57.672: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+Jun 18 09:00:57.672: INFO: stdout: "deployment.extensions \"redis-slave\" force deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 09:00:57.672: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-wjvln" for this suite.
+Jun 18 09:01:41.695: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 09:01:45.541: INFO: namespace: e2e-tests-kubectl-wjvln, resource: bindings, ignored listing per whitelist
+Jun 18 09:01:45.541: INFO: namespace e2e-tests-kubectl-wjvln deletion completed in 47.861125399s
+
+• [SLOW TEST:167.666 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Guestbook application
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should create and stop a working application  [Conformance]
+    /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSS
+------------------------------
+[k8s.io] Probing container 
+  should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 09:01:45.541: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename container-probe
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-container-probe-wzc4l
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:48
+[It] should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating pod liveness-exec in namespace e2e-tests-container-probe-wzc4l
+Jun 18 09:01:50.631: INFO: Started pod liveness-exec in namespace e2e-tests-container-probe-wzc4l
+STEP: checking the pod's current state and verifying that restartCount is present
+Jun 18 09:01:50.653: INFO: Initial restart count of pod liveness-exec is 0
+Jun 18 09:02:36.756: INFO: Restart count of pod e2e-tests-container-probe-wzc4l/liveness-exec is now 1 (46.10368835s elapsed)
+STEP: deleting the pod
+[AfterEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 09:02:36.763: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-container-probe-wzc4l" for this suite.
+Jun 18 09:02:43.533: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 09:02:43.572: INFO: namespace: e2e-tests-container-probe-wzc4l, resource: bindings, ignored listing per whitelist
+Jun 18 09:02:44.562: INFO: namespace e2e-tests-container-probe-wzc4l deletion completed in 7.794382653s
+
+• [SLOW TEST:59.022 seconds]
+[k8s.io] Probing container
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[sig-storage] Subpath Atomic writer volumes 
+  should support subpaths with downward pod [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Subpath
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 09:02:44.562: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename subpath
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-subpath-qkt99
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] Atomic writer volumes
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38
+STEP: Setting up data
+[It] should support subpaths with downward pod [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating pod pod-subpath-test-downwardapi-nkhz
+STEP: Creating a pod to test atomic-volume-subpath
+Jun 18 09:02:46.548: INFO: Waiting up to 5m0s for pod "pod-subpath-test-downwardapi-nkhz" in namespace "e2e-tests-subpath-qkt99" to be "success or failure"
+Jun 18 09:02:46.563: INFO: Pod "pod-subpath-test-downwardapi-nkhz": Phase="Pending", Reason="", readiness=false. Elapsed: 14.907731ms
+Jun 18 09:02:48.566: INFO: Pod "pod-subpath-test-downwardapi-nkhz": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017886807s
+Jun 18 09:02:50.569: INFO: Pod "pod-subpath-test-downwardapi-nkhz": Phase="Running", Reason="", readiness=false. Elapsed: 4.020925599s
+Jun 18 09:02:52.575: INFO: Pod "pod-subpath-test-downwardapi-nkhz": Phase="Running", Reason="", readiness=false. Elapsed: 6.027103521s
+Jun 18 09:02:54.578: INFO: Pod "pod-subpath-test-downwardapi-nkhz": Phase="Running", Reason="", readiness=false. Elapsed: 8.029618315s
+Jun 18 09:02:56.580: INFO: Pod "pod-subpath-test-downwardapi-nkhz": Phase="Running", Reason="", readiness=false. Elapsed: 10.032344378s
+Jun 18 09:02:58.583: INFO: Pod "pod-subpath-test-downwardapi-nkhz": Phase="Running", Reason="", readiness=false. Elapsed: 12.034540914s
+Jun 18 09:03:00.588: INFO: Pod "pod-subpath-test-downwardapi-nkhz": Phase="Running", Reason="", readiness=false. Elapsed: 14.039759612s
+Jun 18 09:03:02.591: INFO: Pod "pod-subpath-test-downwardapi-nkhz": Phase="Running", Reason="", readiness=false. Elapsed: 16.042829482s
+Jun 18 09:03:04.594: INFO: Pod "pod-subpath-test-downwardapi-nkhz": Phase="Running", Reason="", readiness=false. Elapsed: 18.045902737s
+Jun 18 09:03:06.597: INFO: Pod "pod-subpath-test-downwardapi-nkhz": Phase="Running", Reason="", readiness=false. Elapsed: 20.048544205s
+Jun 18 09:03:08.600: INFO: Pod "pod-subpath-test-downwardapi-nkhz": Phase="Running", Reason="", readiness=false. Elapsed: 22.051984077s
+Jun 18 09:03:10.603: INFO: Pod "pod-subpath-test-downwardapi-nkhz": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.055193254s
+STEP: Saw pod success
+Jun 18 09:03:10.603: INFO: Pod "pod-subpath-test-downwardapi-nkhz" satisfied condition "success or failure"
+Jun 18 09:03:10.606: INFO: Trying to get logs from node node5 pod pod-subpath-test-downwardapi-nkhz container test-container-subpath-downwardapi-nkhz: 
+STEP: delete the pod
+Jun 18 09:03:10.619: INFO: Waiting for pod pod-subpath-test-downwardapi-nkhz to disappear
+Jun 18 09:03:10.621: INFO: Pod pod-subpath-test-downwardapi-nkhz no longer exists
+STEP: Deleting pod pod-subpath-test-downwardapi-nkhz
+Jun 18 09:03:10.621: INFO: Deleting pod "pod-subpath-test-downwardapi-nkhz" in namespace "e2e-tests-subpath-qkt99"
+[AfterEach] [sig-storage] Subpath
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 09:03:10.623: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-subpath-qkt99" for this suite.
+Jun 18 09:03:18.638: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 09:03:19.632: INFO: namespace: e2e-tests-subpath-qkt99, resource: bindings, ignored listing per whitelist
+Jun 18 09:03:19.672: INFO: namespace e2e-tests-subpath-qkt99 deletion completed in 9.045007736s
+
+• [SLOW TEST:35.110 seconds]
+[sig-storage] Subpath
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22
+  Atomic writer volumes
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34
+    should support subpaths with downward pod [Conformance]
+    /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Update Demo 
+  should scale a replication controller  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 09:03:19.672: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-q4d2z
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[BeforeEach] [k8s.io] Update Demo
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:295
+[It] should scale a replication controller  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating a replication controller
+Jun 18 09:03:20.658: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 create -f - --namespace=e2e-tests-kubectl-q4d2z'
+Jun 18 09:03:21.637: INFO: stderr: ""
+Jun 18 09:03:21.637: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n"
+STEP: waiting for all containers in name=update-demo pods to come up.
+Jun 18 09:03:21.637: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-q4d2z'
+Jun 18 09:03:22.522: INFO: stderr: ""
+Jun 18 09:03:22.522: INFO: stdout: "update-demo-nautilus-6bvg8 update-demo-nautilus-jclv5 "
+Jun 18 09:03:22.522: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods update-demo-nautilus-6bvg8 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-q4d2z'
+Jun 18 09:03:22.609: INFO: stderr: ""
+Jun 18 09:03:22.609: INFO: stdout: ""
+Jun 18 09:03:22.609: INFO: update-demo-nautilus-6bvg8 is created but not running
+Jun 18 09:03:27.609: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-q4d2z'
+Jun 18 09:03:27.692: INFO: stderr: ""
+Jun 18 09:03:27.692: INFO: stdout: "update-demo-nautilus-6bvg8 update-demo-nautilus-jclv5 "
+Jun 18 09:03:27.692: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods update-demo-nautilus-6bvg8 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-q4d2z'
+Jun 18 09:03:28.520: INFO: stderr: ""
+Jun 18 09:03:28.520: INFO: stdout: "true"
+Jun 18 09:03:28.520: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods update-demo-nautilus-6bvg8 -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-q4d2z'
+Jun 18 09:03:28.600: INFO: stderr: ""
+Jun 18 09:03:28.600: INFO: stdout: "reg.kpaas.io/kubernetes-e2e-test-images/nautilus:1.0"
+Jun 18 09:03:28.600: INFO: validating pod update-demo-nautilus-6bvg8
+Jun 18 09:03:28.604: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+Jun 18 09:03:28.604: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+Jun 18 09:03:28.604: INFO: update-demo-nautilus-6bvg8 is verified up and running
+Jun 18 09:03:28.604: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods update-demo-nautilus-jclv5 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-q4d2z'
+Jun 18 09:03:28.683: INFO: stderr: ""
+Jun 18 09:03:28.683: INFO: stdout: "true"
+Jun 18 09:03:28.683: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods update-demo-nautilus-jclv5 -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-q4d2z'
+Jun 18 09:03:28.761: INFO: stderr: ""
+Jun 18 09:03:28.761: INFO: stdout: "reg.kpaas.io/kubernetes-e2e-test-images/nautilus:1.0"
+Jun 18 09:03:28.761: INFO: validating pod update-demo-nautilus-jclv5
+Jun 18 09:03:28.765: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+Jun 18 09:03:28.765: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+Jun 18 09:03:28.765: INFO: update-demo-nautilus-jclv5 is verified up and running
+STEP: scaling down the replication controller
+Jun 18 09:03:28.766: INFO: scanned /root for discovery docs: 
+Jun 18 09:03:28.766: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 scale rc update-demo-nautilus --replicas=1 --timeout=5m --namespace=e2e-tests-kubectl-q4d2z'
+Jun 18 09:03:29.868: INFO: stderr: ""
+Jun 18 09:03:29.868: INFO: stdout: "replicationcontroller/update-demo-nautilus scaled\n"
+STEP: waiting for all containers in name=update-demo pods to come up.
+Jun 18 09:03:29.868: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-q4d2z'
+Jun 18 09:03:29.946: INFO: stderr: ""
+Jun 18 09:03:29.946: INFO: stdout: "update-demo-nautilus-6bvg8 update-demo-nautilus-jclv5 "
+STEP: Replicas for name=update-demo: expected=1 actual=2
+Jun 18 09:03:34.946: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-q4d2z'
+Jun 18 09:03:35.539: INFO: stderr: ""
+Jun 18 09:03:35.539: INFO: stdout: "update-demo-nautilus-6bvg8 update-demo-nautilus-jclv5 "
+STEP: Replicas for name=update-demo: expected=1 actual=2
+Jun 18 09:03:40.540: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-q4d2z'
+Jun 18 09:03:40.621: INFO: stderr: ""
+Jun 18 09:03:40.621: INFO: stdout: "update-demo-nautilus-6bvg8 update-demo-nautilus-jclv5 "
+STEP: Replicas for name=update-demo: expected=1 actual=2
+Jun 18 09:03:45.621: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-q4d2z'
+Jun 18 09:03:45.696: INFO: stderr: ""
+Jun 18 09:03:45.696: INFO: stdout: "update-demo-nautilus-jclv5 "
+Jun 18 09:03:45.696: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods update-demo-nautilus-jclv5 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-q4d2z'
+Jun 18 09:03:45.772: INFO: stderr: ""
+Jun 18 09:03:45.772: INFO: stdout: "true"
+Jun 18 09:03:45.772: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods update-demo-nautilus-jclv5 -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-q4d2z'
+Jun 18 09:03:45.853: INFO: stderr: ""
+Jun 18 09:03:45.853: INFO: stdout: "reg.kpaas.io/kubernetes-e2e-test-images/nautilus:1.0"
+Jun 18 09:03:45.853: INFO: validating pod update-demo-nautilus-jclv5
+Jun 18 09:03:45.856: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+Jun 18 09:03:45.856: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+Jun 18 09:03:45.856: INFO: update-demo-nautilus-jclv5 is verified up and running
+STEP: scaling up the replication controller
+Jun 18 09:03:45.857: INFO: scanned /root for discovery docs: 
+Jun 18 09:03:45.857: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 scale rc update-demo-nautilus --replicas=2 --timeout=5m --namespace=e2e-tests-kubectl-q4d2z'
+Jun 18 09:03:47.521: INFO: stderr: ""
+Jun 18 09:03:47.521: INFO: stdout: "replicationcontroller/update-demo-nautilus scaled\n"
+STEP: waiting for all containers in name=update-demo pods to come up.
+Jun 18 09:03:47.521: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-q4d2z'
+Jun 18 09:03:47.601: INFO: stderr: ""
+Jun 18 09:03:47.601: INFO: stdout: "update-demo-nautilus-fb9s4 update-demo-nautilus-jclv5 "
+Jun 18 09:03:47.601: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods update-demo-nautilus-fb9s4 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-q4d2z'
+Jun 18 09:03:47.681: INFO: stderr: ""
+Jun 18 09:03:47.682: INFO: stdout: "true"
+Jun 18 09:03:47.682: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods update-demo-nautilus-fb9s4 -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-q4d2z'
+Jun 18 09:03:47.758: INFO: stderr: ""
+Jun 18 09:03:47.758: INFO: stdout: "reg.kpaas.io/kubernetes-e2e-test-images/nautilus:1.0"
+Jun 18 09:03:47.758: INFO: validating pod update-demo-nautilus-fb9s4
+Jun 18 09:03:48.514: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+Jun 18 09:03:48.514: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+Jun 18 09:03:48.514: INFO: update-demo-nautilus-fb9s4 is verified up and running
+Jun 18 09:03:48.514: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods update-demo-nautilus-jclv5 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-q4d2z'
+Jun 18 09:03:48.615: INFO: stderr: ""
+Jun 18 09:03:48.615: INFO: stdout: "true"
+Jun 18 09:03:48.615: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods update-demo-nautilus-jclv5 -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-q4d2z'
+Jun 18 09:03:48.694: INFO: stderr: ""
+Jun 18 09:03:48.694: INFO: stdout: "reg.kpaas.io/kubernetes-e2e-test-images/nautilus:1.0"
+Jun 18 09:03:48.694: INFO: validating pod update-demo-nautilus-jclv5
+Jun 18 09:03:48.698: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+Jun 18 09:03:48.698: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+Jun 18 09:03:48.698: INFO: update-demo-nautilus-jclv5 is verified up and running
+STEP: using delete to clean up resources
+Jun 18 09:03:48.698: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-q4d2z'
+Jun 18 09:03:49.544: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+Jun 18 09:03:49.544: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n"
+Jun 18 09:03:49.544: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get rc,svc -l name=update-demo --no-headers --namespace=e2e-tests-kubectl-q4d2z'
+Jun 18 09:03:49.638: INFO: stderr: "No resources found.\n"
+Jun 18 09:03:49.639: INFO: stdout: ""
+Jun 18 09:03:49.639: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-656024001 get pods -l name=update-demo --namespace=e2e-tests-kubectl-q4d2z -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}'
+Jun 18 09:03:49.751: INFO: stderr: ""
+Jun 18 09:03:49.751: INFO: stdout: ""
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 09:03:49.751: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-q4d2z" for this suite.
+Jun 18 09:04:15.765: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 09:04:15.855: INFO: namespace: e2e-tests-kubectl-q4d2z, resource: bindings, ignored listing per whitelist
+Jun 18 09:04:16.568: INFO: namespace e2e-tests-kubectl-q4d2z deletion completed in 26.812973065s
+
+• [SLOW TEST:56.896 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Update Demo
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should scale a replication controller  [Conformance]
+    /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should update annotations on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 09:04:16.568: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-fsj2w
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should update annotations on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating the pod
+Jun 18 09:04:22.517: INFO: Successfully updated pod "annotationupdate0cd7c59c-91a8-11e9-bbf5-0e74dabf3615"
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 09:04:26.541: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-fsj2w" for this suite.
+Jun 18 09:04:50.555: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 09:04:50.664: INFO: namespace: e2e-tests-projected-fsj2w, resource: bindings, ignored listing per whitelist
+Jun 18 09:04:50.866: INFO: namespace e2e-tests-projected-fsj2w deletion completed in 24.321312475s
+
+• [SLOW TEST:34.298 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should update annotations on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Namespaces [Serial] 
+  should ensure that all pods are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-api-machinery] Namespaces [Serial]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 09:04:50.867: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename namespaces
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-namespaces-ssj72
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should ensure that all pods are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a test namespace
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-nsdeletetest-c2z7p
+STEP: Waiting for a default service account to be provisioned in namespace
+STEP: Creating a pod in the namespace
+STEP: Waiting for the pod to have running status
+STEP: Creating an uninitialized pod in the namespace
+STEP: Deleting the namespace
+STEP: Waiting for the namespace to be removed.
+Jun 18 09:05:04.568: INFO: error from create uninitialized namespace: Internal error occurred: object deleted while waiting for creation
+STEP: Recreating the namespace
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-nsdeletetest-fhvqg
+STEP: Verifying there are no pods in the namespace
+[AfterEach] [sig-api-machinery] Namespaces [Serial]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 09:05:22.518: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-namespaces-ssj72" for this suite.
+Jun 18 09:05:30.536: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 09:05:30.629: INFO: namespace: e2e-tests-namespaces-ssj72, resource: bindings, ignored listing per whitelist
+Jun 18 09:05:30.847: INFO: namespace e2e-tests-namespaces-ssj72 deletion completed in 8.321354768s
+STEP: Destroying namespace "e2e-tests-nsdeletetest-c2z7p" for this suite.
+Jun 18 09:05:30.851: INFO: Namespace e2e-tests-nsdeletetest-c2z7p was already deleted
+STEP: Destroying namespace "e2e-tests-nsdeletetest-fhvqg" for this suite.
+Jun 18 09:05:38.859: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 09:05:39.584: INFO: namespace: e2e-tests-nsdeletetest-fhvqg, resource: bindings, ignored listing per whitelist
+Jun 18 09:05:39.623: INFO: namespace e2e-tests-nsdeletetest-fhvqg deletion completed in 8.772236523s
+
+• [SLOW TEST:48.756 seconds]
+[sig-api-machinery] Namespaces [Serial]
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should ensure that all pods are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSS
+------------------------------
+[sig-storage] HostPath 
+  should give a volume the correct mode [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] HostPath
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 09:05:39.623: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename hostpath
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-hostpath-pq9xw
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] HostPath
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/host_path.go:37
+[It] should give a volume the correct mode [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test hostPath mode
+Jun 18 09:05:40.526: INFO: Waiting up to 5m0s for pod "pod-host-path-test" in namespace "e2e-tests-hostpath-pq9xw" to be "success or failure"
+Jun 18 09:05:40.530: INFO: Pod "pod-host-path-test": Phase="Pending", Reason="", readiness=false. Elapsed: 4.638516ms
+Jun 18 09:05:42.554: INFO: Pod "pod-host-path-test": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.027832416s
+STEP: Saw pod success
+Jun 18 09:05:42.554: INFO: Pod "pod-host-path-test" satisfied condition "success or failure"
+Jun 18 09:05:42.561: INFO: Trying to get logs from node node5 pod pod-host-path-test container test-container-1: 
+STEP: delete the pod
+Jun 18 09:05:42.599: INFO: Waiting for pod pod-host-path-test to disappear
+Jun 18 09:05:42.603: INFO: Pod pod-host-path-test no longer exists
+[AfterEach] [sig-storage] HostPath
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 09:05:42.603: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-hostpath-pq9xw" for this suite.
+Jun 18 09:05:54.627: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 09:05:56.616: INFO: namespace: e2e-tests-hostpath-pq9xw, resource: bindings, ignored listing per whitelist
+Jun 18 09:05:56.632: INFO: namespace e2e-tests-hostpath-pq9xw deletion completed in 14.025554268s
+
+• [SLOW TEST:17.008 seconds]
+[sig-storage] HostPath
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/host_path.go:34
+  should give a volume the correct mode [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (root,0777,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 09:05:56.632: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-skvmf
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (root,0777,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test emptydir 0777 on node default medium
+Jun 18 09:05:57.689: INFO: Waiting up to 5m0s for pod "pod-4900b8c8-91a8-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-emptydir-skvmf" to be "success or failure"
+Jun 18 09:05:57.696: INFO: Pod "pod-4900b8c8-91a8-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 7.095645ms
+Jun 18 09:05:59.699: INFO: Pod "pod-4900b8c8-91a8-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.010020356s
+Jun 18 09:06:01.702: INFO: Pod "pod-4900b8c8-91a8-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.012698094s
+STEP: Saw pod success
+Jun 18 09:06:01.702: INFO: Pod "pod-4900b8c8-91a8-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 09:06:01.704: INFO: Trying to get logs from node node5 pod pod-4900b8c8-91a8-11e9-bbf5-0e74dabf3615 container test-container: 
+STEP: delete the pod
+Jun 18 09:06:01.721: INFO: Waiting for pod pod-4900b8c8-91a8-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 09:06:01.723: INFO: Pod pod-4900b8c8-91a8-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 09:06:01.723: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-emptydir-skvmf" for this suite.
+Jun 18 09:06:14.564: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 09:06:15.594: INFO: namespace: e2e-tests-emptydir-skvmf, resource: bindings, ignored listing per whitelist
+Jun 18 09:06:15.622: INFO: namespace e2e-tests-emptydir-skvmf deletion completed in 13.085608173s
+
+• [SLOW TEST:18.990 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40
+  should support (root,0777,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSS
+------------------------------
+[k8s.io] Variable Expansion 
+  should allow substituting values in a container's command [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Variable Expansion
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 09:06:15.622: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename var-expansion
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-var-expansion-lwrb2
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should allow substituting values in a container's command [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test substitution in container's command
+Jun 18 09:06:16.521: INFO: Waiting up to 5m0s for pod "var-expansion-543a31ea-91a8-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-var-expansion-lwrb2" to be "success or failure"
+Jun 18 09:06:16.526: INFO: Pod "var-expansion-543a31ea-91a8-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 5.079399ms
+Jun 18 09:06:18.541: INFO: Pod "var-expansion-543a31ea-91a8-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020361909s
+Jun 18 09:06:20.569: INFO: Pod "var-expansion-543a31ea-91a8-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 4.048540913s
+Jun 18 09:06:22.572: INFO: Pod "var-expansion-543a31ea-91a8-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.051249537s
+STEP: Saw pod success
+Jun 18 09:06:22.572: INFO: Pod "var-expansion-543a31ea-91a8-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 09:06:22.574: INFO: Trying to get logs from node node5 pod var-expansion-543a31ea-91a8-11e9-bbf5-0e74dabf3615 container dapi-container: 
+STEP: delete the pod
+Jun 18 09:06:22.590: INFO: Waiting for pod var-expansion-543a31ea-91a8-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 09:06:22.594: INFO: Pod var-expansion-543a31ea-91a8-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [k8s.io] Variable Expansion
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 09:06:22.594: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-var-expansion-lwrb2" for this suite.
+Jun 18 09:06:32.652: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 09:06:33.625: INFO: namespace: e2e-tests-var-expansion-lwrb2, resource: bindings, ignored listing per whitelist
+Jun 18 09:06:34.583: INFO: namespace e2e-tests-var-expansion-lwrb2 deletion completed in 11.985237987s
+
+• [SLOW TEST:18.961 seconds]
+[k8s.io] Variable Expansion
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should allow substituting values in a container's command [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Kubelet when scheduling a read only busybox container 
+  should not write to root filesystem [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 09:06:34.583: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename kubelet-test
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubelet-test-z7rvt
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37
+[It] should not write to root filesystem [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[AfterEach] [k8s.io] Kubelet
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 09:06:42.551: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubelet-test-z7rvt" for this suite.
+Jun 18 09:07:32.565: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 09:07:32.603: INFO: namespace: e2e-tests-kubelet-test-z7rvt, resource: bindings, ignored listing per whitelist
+Jun 18 09:07:32.887: INFO: namespace e2e-tests-kubelet-test-z7rvt deletion completed in 50.331065325s
+
+• [SLOW TEST:58.303 seconds]
+[k8s.io] Kubelet
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  when scheduling a read only busybox container
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:186
+    should not write to root filesystem [NodeConformance] [Conformance]
+    /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSS
+------------------------------
+[sig-apps] ReplicationController 
+  should adopt matching pods on creation [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-apps] ReplicationController
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 09:07:32.887: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename replication-controller
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-replication-controller-kmggl
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should adopt matching pods on creation [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Given a Pod with a 'name' label pod-adoption is created
+STEP: When a replication controller with a matching selector is created
+STEP: Then the orphan pod is adopted
+[AfterEach] [sig-apps] ReplicationController
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 09:07:46.692: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-replication-controller-kmggl" for this suite.
+Jun 18 09:08:12.703: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 09:08:13.597: INFO: namespace: e2e-tests-replication-controller-kmggl, resource: bindings, ignored listing per whitelist
+Jun 18 09:08:13.608: INFO: namespace e2e-tests-replication-controller-kmggl deletion completed in 26.913499943s
+
+• [SLOW TEST:40.722 seconds]
+[sig-apps] ReplicationController
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should adopt matching pods on creation [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSS
+------------------------------
+[k8s.io] Docker Containers 
+  should use the image defaults if command and args are blank [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 09:08:13.609: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename containers
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-containers-mmfgx
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should use the image defaults if command and args are blank [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test use defaults
+Jun 18 09:08:14.531: INFO: Waiting up to 5m0s for pod "client-containers-9a8fa3ce-91a8-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-containers-mmfgx" to be "success or failure"
+Jun 18 09:08:14.539: INFO: Pod "client-containers-9a8fa3ce-91a8-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 8.151978ms
+Jun 18 09:08:16.542: INFO: Pod "client-containers-9a8fa3ce-91a8-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.010389598s
+STEP: Saw pod success
+Jun 18 09:08:16.542: INFO: Pod "client-containers-9a8fa3ce-91a8-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 09:08:16.548: INFO: Trying to get logs from node node5 pod client-containers-9a8fa3ce-91a8-11e9-bbf5-0e74dabf3615 container test-container: 
+STEP: delete the pod
+Jun 18 09:08:16.577: INFO: Waiting for pod client-containers-9a8fa3ce-91a8-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 09:08:16.581: INFO: Pod client-containers-9a8fa3ce-91a8-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 09:08:16.581: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-containers-mmfgx" for this suite.
+Jun 18 09:08:24.597: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 09:08:24.630: INFO: namespace: e2e-tests-containers-mmfgx, resource: bindings, ignored listing per whitelist
+Jun 18 09:08:25.525: INFO: namespace e2e-tests-containers-mmfgx deletion completed in 8.939520895s
+
+• [SLOW TEST:11.916 seconds]
+[k8s.io] Docker Containers
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should use the image defaults if command and args are blank [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSS
+------------------------------
+[sig-apps] Daemon set [Serial] 
+  should update pod when spec was updated and update strategy is RollingUpdate [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 09:08:25.525: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename daemonsets
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-daemonsets-m4267
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:102
+[It] should update pod when spec was updated and update strategy is RollingUpdate [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+Jun 18 09:08:26.615: INFO: Creating simple daemon set daemon-set
+STEP: Check that daemon pods launch on every node of the cluster.
+Jun 18 09:08:26.621: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:26.626: INFO: Number of nodes with available pods: 0
+Jun 18 09:08:26.626: INFO: Node node1 is running more than one daemon pod
+Jun 18 09:08:27.633: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:27.637: INFO: Number of nodes with available pods: 0
+Jun 18 09:08:27.637: INFO: Node node1 is running more than one daemon pod
+Jun 18 09:08:28.631: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:28.633: INFO: Number of nodes with available pods: 2
+Jun 18 09:08:28.633: INFO: Node node1 is running more than one daemon pod
+Jun 18 09:08:29.630: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:29.633: INFO: Number of nodes with available pods: 4
+Jun 18 09:08:29.633: INFO: Node node4 is running more than one daemon pod
+Jun 18 09:08:30.634: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:30.638: INFO: Number of nodes with available pods: 4
+Jun 18 09:08:30.638: INFO: Node node4 is running more than one daemon pod
+Jun 18 09:08:31.635: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:31.640: INFO: Number of nodes with available pods: 4
+Jun 18 09:08:31.640: INFO: Node node4 is running more than one daemon pod
+Jun 18 09:08:32.630: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:32.633: INFO: Number of nodes with available pods: 4
+Jun 18 09:08:32.633: INFO: Node node4 is running more than one daemon pod
+Jun 18 09:08:33.629: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:33.632: INFO: Number of nodes with available pods: 4
+Jun 18 09:08:33.632: INFO: Node node4 is running more than one daemon pod
+Jun 18 09:08:34.630: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:34.632: INFO: Number of nodes with available pods: 4
+Jun 18 09:08:34.632: INFO: Node node4 is running more than one daemon pod
+Jun 18 09:08:35.629: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:35.632: INFO: Number of nodes with available pods: 4
+Jun 18 09:08:35.632: INFO: Node node4 is running more than one daemon pod
+Jun 18 09:08:36.630: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:36.632: INFO: Number of nodes with available pods: 4
+Jun 18 09:08:36.632: INFO: Node node4 is running more than one daemon pod
+Jun 18 09:08:37.630: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:37.634: INFO: Number of nodes with available pods: 4
+Jun 18 09:08:37.634: INFO: Node node4 is running more than one daemon pod
+Jun 18 09:08:38.630: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:38.633: INFO: Number of nodes with available pods: 4
+Jun 18 09:08:38.633: INFO: Node node4 is running more than one daemon pod
+Jun 18 09:08:39.630: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:39.633: INFO: Number of nodes with available pods: 4
+Jun 18 09:08:39.633: INFO: Node node4 is running more than one daemon pod
+Jun 18 09:08:40.629: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:40.632: INFO: Number of nodes with available pods: 4
+Jun 18 09:08:40.632: INFO: Node node4 is running more than one daemon pod
+Jun 18 09:08:41.629: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:41.633: INFO: Number of nodes with available pods: 4
+Jun 18 09:08:41.633: INFO: Node node4 is running more than one daemon pod
+Jun 18 09:08:42.630: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:42.633: INFO: Number of nodes with available pods: 4
+Jun 18 09:08:42.633: INFO: Node node4 is running more than one daemon pod
+Jun 18 09:08:43.630: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:43.633: INFO: Number of nodes with available pods: 4
+Jun 18 09:08:43.633: INFO: Node node4 is running more than one daemon pod
+Jun 18 09:08:44.630: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:44.633: INFO: Number of nodes with available pods: 4
+Jun 18 09:08:44.633: INFO: Node node4 is running more than one daemon pod
+Jun 18 09:08:45.630: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:45.633: INFO: Number of nodes with available pods: 4
+Jun 18 09:08:45.633: INFO: Node node4 is running more than one daemon pod
+Jun 18 09:08:46.631: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:46.634: INFO: Number of nodes with available pods: 4
+Jun 18 09:08:46.634: INFO: Node node4 is running more than one daemon pod
+Jun 18 09:08:47.630: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:47.634: INFO: Number of nodes with available pods: 4
+Jun 18 09:08:47.634: INFO: Node node4 is running more than one daemon pod
+Jun 18 09:08:48.632: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:48.635: INFO: Number of nodes with available pods: 4
+Jun 18 09:08:48.635: INFO: Node node4 is running more than one daemon pod
+Jun 18 09:08:49.633: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:49.636: INFO: Number of nodes with available pods: 4
+Jun 18 09:08:49.636: INFO: Node node4 is running more than one daemon pod
+Jun 18 09:08:50.630: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:50.633: INFO: Number of nodes with available pods: 4
+Jun 18 09:08:50.633: INFO: Node node4 is running more than one daemon pod
+Jun 18 09:08:51.631: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:51.634: INFO: Number of nodes with available pods: 4
+Jun 18 09:08:51.634: INFO: Node node4 is running more than one daemon pod
+Jun 18 09:08:52.633: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:52.636: INFO: Number of nodes with available pods: 5
+Jun 18 09:08:52.636: INFO: Number of running nodes: 5, number of available pods: 5
+STEP: Update daemon pods image.
+STEP: Check that daemon pods images are updated.
+Jun 18 09:08:52.658: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:52.658: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:52.658: INFO: Wrong image for pod: daemon-set-f48fc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:52.658: INFO: Wrong image for pod: daemon-set-kdhf8. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:52.658: INFO: Wrong image for pod: daemon-set-scpvg. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:52.672: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:53.675: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:53.675: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:53.675: INFO: Wrong image for pod: daemon-set-f48fc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:53.675: INFO: Wrong image for pod: daemon-set-kdhf8. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:53.675: INFO: Wrong image for pod: daemon-set-scpvg. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:53.679: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:54.675: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:54.675: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:54.675: INFO: Wrong image for pod: daemon-set-f48fc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:54.675: INFO: Wrong image for pod: daemon-set-kdhf8. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:54.675: INFO: Wrong image for pod: daemon-set-scpvg. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:54.675: INFO: Pod daemon-set-scpvg is not available
+Jun 18 09:08:54.680: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:55.677: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:55.677: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:55.677: INFO: Wrong image for pod: daemon-set-f48fc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:55.677: INFO: Wrong image for pod: daemon-set-kdhf8. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:55.677: INFO: Pod daemon-set-wp66j is not available
+Jun 18 09:08:55.680: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:56.675: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:56.676: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:56.676: INFO: Wrong image for pod: daemon-set-f48fc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:56.676: INFO: Wrong image for pod: daemon-set-kdhf8. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:56.676: INFO: Pod daemon-set-wp66j is not available
+Jun 18 09:08:56.681: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:57.675: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:57.675: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:57.675: INFO: Wrong image for pod: daemon-set-f48fc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:57.675: INFO: Wrong image for pod: daemon-set-kdhf8. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:57.675: INFO: Pod daemon-set-wp66j is not available
+Jun 18 09:08:57.678: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:58.675: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:58.675: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:58.675: INFO: Wrong image for pod: daemon-set-f48fc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:58.675: INFO: Wrong image for pod: daemon-set-kdhf8. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:58.675: INFO: Pod daemon-set-wp66j is not available
+Jun 18 09:08:58.679: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:08:59.677: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:59.677: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:59.677: INFO: Wrong image for pod: daemon-set-f48fc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:59.677: INFO: Wrong image for pod: daemon-set-kdhf8. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:08:59.677: INFO: Pod daemon-set-kdhf8 is not available
+Jun 18 09:08:59.680: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:00.676: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:00.676: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:00.676: INFO: Wrong image for pod: daemon-set-f48fc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:00.676: INFO: Pod daemon-set-p74wc is not available
+Jun 18 09:09:00.680: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:01.675: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:01.676: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:01.676: INFO: Wrong image for pod: daemon-set-f48fc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:01.676: INFO: Pod daemon-set-p74wc is not available
+Jun 18 09:09:01.679: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:02.676: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:02.676: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:02.676: INFO: Wrong image for pod: daemon-set-f48fc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:02.676: INFO: Pod daemon-set-p74wc is not available
+Jun 18 09:09:02.680: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:03.675: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:03.675: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:03.675: INFO: Wrong image for pod: daemon-set-f48fc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:03.675: INFO: Pod daemon-set-p74wc is not available
+Jun 18 09:09:03.678: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:04.676: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:04.676: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:04.676: INFO: Wrong image for pod: daemon-set-f48fc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:04.679: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:06.543: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:06.543: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:06.543: INFO: Wrong image for pod: daemon-set-f48fc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:06.560: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:06.676: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:06.676: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:06.676: INFO: Wrong image for pod: daemon-set-f48fc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:06.676: INFO: Pod daemon-set-f48fc is not available
+Jun 18 09:09:06.679: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:07.675: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:07.675: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:07.675: INFO: Wrong image for pod: daemon-set-f48fc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:07.675: INFO: Pod daemon-set-f48fc is not available
+Jun 18 09:09:07.679: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:08.676: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:08.676: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:08.676: INFO: Wrong image for pod: daemon-set-f48fc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:08.676: INFO: Pod daemon-set-f48fc is not available
+Jun 18 09:09:08.679: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:09.677: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:09.677: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:09.677: INFO: Wrong image for pod: daemon-set-f48fc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:09.677: INFO: Pod daemon-set-f48fc is not available
+Jun 18 09:09:09.681: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:10.677: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:10.677: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:10.677: INFO: Wrong image for pod: daemon-set-f48fc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:10.677: INFO: Pod daemon-set-f48fc is not available
+Jun 18 09:09:10.682: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:11.676: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:11.676: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:11.676: INFO: Wrong image for pod: daemon-set-f48fc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:11.676: INFO: Pod daemon-set-f48fc is not available
+Jun 18 09:09:11.679: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:12.676: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:12.676: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:12.676: INFO: Wrong image for pod: daemon-set-f48fc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:12.676: INFO: Pod daemon-set-f48fc is not available
+Jun 18 09:09:12.679: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:13.676: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:13.676: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:13.676: INFO: Wrong image for pod: daemon-set-f48fc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:13.676: INFO: Pod daemon-set-f48fc is not available
+Jun 18 09:09:13.679: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:14.676: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:14.676: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:14.676: INFO: Wrong image for pod: daemon-set-f48fc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:14.676: INFO: Pod daemon-set-f48fc is not available
+Jun 18 09:09:14.679: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:15.676: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:15.676: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:15.676: INFO: Wrong image for pod: daemon-set-f48fc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:15.676: INFO: Pod daemon-set-f48fc is not available
+Jun 18 09:09:15.680: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:16.676: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:16.676: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:16.676: INFO: Wrong image for pod: daemon-set-f48fc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:16.676: INFO: Pod daemon-set-f48fc is not available
+Jun 18 09:09:16.679: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:17.676: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:17.676: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:17.676: INFO: Pod daemon-set-pn7js is not available
+Jun 18 09:09:17.686: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:18.678: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:18.678: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:18.678: INFO: Pod daemon-set-pn7js is not available
+Jun 18 09:09:18.682: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:19.676: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:19.676: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:19.676: INFO: Pod daemon-set-pn7js is not available
+Jun 18 09:09:19.679: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:20.675: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:20.676: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:20.676: INFO: Pod daemon-set-pn7js is not available
+Jun 18 09:09:20.680: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:21.675: INFO: Wrong image for pod: daemon-set-8tr4z. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:21.675: INFO: Pod daemon-set-8tr4z is not available
+Jun 18 09:09:21.675: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:21.678: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:22.675: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:22.675: INFO: Pod daemon-set-j6q9b is not available
+Jun 18 09:09:22.679: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:23.676: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:23.676: INFO: Pod daemon-set-j6q9b is not available
+Jun 18 09:09:23.680: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:24.675: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:24.678: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:25.681: INFO: Wrong image for pod: daemon-set-dvrsc. Expected: reg.kpaas.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 18 09:09:25.681: INFO: Pod daemon-set-dvrsc is not available
+Jun 18 09:09:25.700: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:26.675: INFO: Pod daemon-set-nbq8j is not available
+Jun 18 09:09:26.678: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+STEP: Check that daemon pods are still running on every node of the cluster.
+Jun 18 09:09:26.681: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:26.684: INFO: Number of nodes with available pods: 4
+Jun 18 09:09:26.684: INFO: Node node3 is running more than one daemon pod
+Jun 18 09:09:27.688: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:27.691: INFO: Number of nodes with available pods: 4
+Jun 18 09:09:27.691: INFO: Node node3 is running more than one daemon pod
+Jun 18 09:09:28.687: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:28.690: INFO: Number of nodes with available pods: 4
+Jun 18 09:09:28.690: INFO: Node node3 is running more than one daemon pod
+Jun 18 09:09:29.688: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:29.690: INFO: Number of nodes with available pods: 4
+Jun 18 09:09:29.690: INFO: Node node3 is running more than one daemon pod
+Jun 18 09:09:30.688: INFO: DaemonSet pods can't tolerate node master1 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 18 09:09:30.690: INFO: Number of nodes with available pods: 5
+Jun 18 09:09:30.690: INFO: Number of running nodes: 5, number of available pods: 5
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:68
+STEP: Deleting DaemonSet "daemon-set"
+STEP: deleting DaemonSet.extensions daemon-set in namespace e2e-tests-daemonsets-m4267, will wait for the garbage collector to delete the pods
+Jun 18 09:09:30.759: INFO: Deleting DaemonSet.extensions daemon-set took: 5.807348ms
+Jun 18 09:09:30.860: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.325441ms
+Jun 18 09:09:40.562: INFO: Number of nodes with available pods: 0
+Jun 18 09:09:40.562: INFO: Number of running nodes: 0, number of available pods: 0
+Jun 18 09:09:40.565: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/e2e-tests-daemonsets-m4267/daemonsets","resourceVersion":"13567054"},"items":null}
+
+Jun 18 09:09:40.567: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/e2e-tests-daemonsets-m4267/pods","resourceVersion":"13567054"},"items":null}
+
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 09:09:40.580: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-daemonsets-m4267" for this suite.
+Jun 18 09:09:50.596: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 09:09:50.678: INFO: namespace: e2e-tests-daemonsets-m4267, resource: bindings, ignored listing per whitelist
+Jun 18 09:09:50.937: INFO: namespace e2e-tests-daemonsets-m4267 deletion completed in 10.353107048s
+
+• [SLOW TEST:85.412 seconds]
+[sig-apps] Daemon set [Serial]
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should update pod when spec was updated and update strategy is RollingUpdate [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSS
+------------------------------
+[sig-storage] ConfigMap 
+  binary data should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 09:09:50.937: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename configmap
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-configmap-plknd
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] binary data should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name configmap-test-upd-d474cd0d-91a8-11e9-bbf5-0e74dabf3615
+STEP: Creating the pod
+STEP: Waiting for pod with text data
+STEP: Waiting for pod with binary data
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 09:09:53.696: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-configmap-plknd" for this suite.
+Jun 18 09:10:21.711: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 09:10:22.561: INFO: namespace: e2e-tests-configmap-plknd, resource: bindings, ignored listing per whitelist
+Jun 18 09:10:22.612: INFO: namespace e2e-tests-configmap-plknd deletion completed in 28.912544698s
+
+• [SLOW TEST:31.675 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33
+  binary data should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[sig-scheduling] SchedulerPredicates [Serial] 
+  validates that NodeSelector is respected if not matching  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 09:10:22.612: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename sched-pred
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-sched-pred-t856c
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:79
+Jun 18 09:10:23.512: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready
+Jun 18 09:10:23.521: INFO: Waiting for terminating namespaces to be deleted...
+Jun 18 09:10:23.524: INFO: 
+Logging pods the kubelet thinks is on node node1 before test
+Jun 18 09:10:23.534: INFO: qce-postgres-stolon-sentinel-b6bcb4448-gch5x from qce started at 2019-06-04 11:39:26 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.534: INFO: 	Container stolon ready: true, restart count 0
+Jun 18 09:10:23.534: INFO: mongorsdata-operator-54b67c6cc5-fh4r4 from qiniu-mongors started at 2019-06-04 11:39:26 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.534: INFO: 	Container mongors-operator ready: true, restart count 0
+Jun 18 09:10:23.534: INFO: prometheus-operator-prometheus-node-exporter-jd657 from kube-system started at 2019-05-16 08:39:36 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.534: INFO: 	Container node-exporter ready: true, restart count 0
+Jun 18 09:10:23.534: INFO: qce-etcd-5665b647b-cjlnd from qce started at 2019-06-04 11:39:26 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.534: INFO: 	Container qce-etcd-etcd ready: true, restart count 0
+Jun 18 09:10:23.534: INFO: csi-rbd-ceph-csi-rbd-nodeplugin-r97x2 from default started at 2019-05-14 08:47:33 +0000 UTC (2 container statuses recorded)
+Jun 18 09:10:23.534: INFO: 	Container csi-rbdplugin ready: true, restart count 0
+Jun 18 09:10:23.534: INFO: 	Container driver-registrar ready: true, restart count 0
+Jun 18 09:10:23.534: INFO: qce-authzhook-deploy-75cbd8bc4b-wd28x from qce started at 2019-05-14 10:16:10 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.534: INFO: 	Container qce-authzhook ready: true, restart count 0
+Jun 18 09:10:23.534: INFO: prometheus-prometheus-operator-prometheus-0 from kube-system started at 2019-06-15 09:23:36 +0000 UTC (3 container statuses recorded)
+Jun 18 09:10:23.534: INFO: 	Container prometheus ready: true, restart count 1
+Jun 18 09:10:23.534: INFO: 	Container prometheus-config-reloader ready: true, restart count 0
+Jun 18 09:10:23.534: INFO: 	Container rules-configmap-reloader ready: true, restart count 0
+Jun 18 09:10:23.534: INFO: redisdata-operator-cdd96dd96-mxcw6 from qiniu-redis started at 2019-06-04 11:39:27 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.534: INFO: 	Container redis-operator ready: true, restart count 0
+Jun 18 09:10:23.534: INFO: csirbd-demo-pod from default started at 2019-05-14 08:50:23 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.534: INFO: 	Container web-server ready: true, restart count 0
+Jun 18 09:10:23.534: INFO: qce-postgres-stolon-keeper-1 from qce started at 2019-05-14 09:40:52 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.534: INFO: 	Container stolon ready: true, restart count 0
+Jun 18 09:10:23.534: INFO: alert-dispatcher-58d448f9c9-t5npr from kube-system started at 2019-06-04 11:39:26 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.534: INFO: 	Container alert-dispatcher ready: true, restart count 0
+Jun 18 09:10:23.534: INFO: qce-mongo-deploy-65f555f54f-2td5v from qce started at 2019-06-04 11:39:26 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.534: INFO: 	Container qce-mongo ready: true, restart count 0
+Jun 18 09:10:23.534: INFO: kube-proxy-4kq5g from kube-system started at 2019-05-14 05:39:01 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.534: INFO: 	Container kube-proxy ready: true, restart count 2
+Jun 18 09:10:23.534: INFO: calico-node-87wc8 from kube-system started at 2019-05-14 06:16:49 +0000 UTC (2 container statuses recorded)
+Jun 18 09:10:23.534: INFO: 	Container calico-node ready: true, restart count 2
+Jun 18 09:10:23.534: INFO: 	Container install-cni ready: true, restart count 2
+Jun 18 09:10:23.534: INFO: csi-cephfs-ceph-csi-cephfs-provisioner-0 from default started at 2019-05-14 08:47:42 +0000 UTC (2 container statuses recorded)
+Jun 18 09:10:23.534: INFO: 	Container csi-cephfsplugin ready: true, restart count 0
+Jun 18 09:10:23.534: INFO: 	Container csi-provisioner ready: true, restart count 0
+Jun 18 09:10:23.534: INFO: alertmanager-prometheus-operator-alertmanager-1 from kube-system started at 2019-06-15 05:36:24 +0000 UTC (2 container statuses recorded)
+Jun 18 09:10:23.534: INFO: 	Container alertmanager ready: true, restart count 0
+Jun 18 09:10:23.534: INFO: 	Container config-reloader ready: true, restart count 0
+Jun 18 09:10:23.534: INFO: csi-cephfs-ceph-csi-cephfs-nodeplugin-2smn4 from default started at 2019-05-14 08:47:42 +0000 UTC (2 container statuses recorded)
+Jun 18 09:10:23.534: INFO: 	Container csi-cephfsplugin ready: true, restart count 0
+Jun 18 09:10:23.534: INFO: 	Container driver-registrar ready: true, restart count 0
+Jun 18 09:10:23.534: INFO: logkit-poc-dk8x2 from kube-system started at 2019-05-17 03:17:51 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.534: INFO: 	Container logkit-poc ready: true, restart count 0
+Jun 18 09:10:23.534: INFO: 
+Logging pods the kubelet thinks is on node node2 before test
+Jun 18 09:10:23.555: INFO: logkit-poc-cgpj8 from kube-system started at 2019-05-17 03:17:51 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.555: INFO: 	Container logkit-poc ready: true, restart count 0
+Jun 18 09:10:23.555: INFO: kube-proxy-hm6bg from kube-system started at 2019-05-14 05:39:31 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.555: INFO: 	Container kube-proxy ready: true, restart count 0
+Jun 18 09:10:23.555: INFO: alert-controller-568fb6794d-f9vhm from kube-system started at 2019-06-14 01:20:22 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.555: INFO: 	Container alert-controller ready: true, restart count 0
+Jun 18 09:10:23.555: INFO: qce-postgres-stolon-keeper-0 from qce started at 2019-06-14 23:07:51 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.555: INFO: 	Container stolon ready: true, restart count 0
+Jun 18 09:10:23.555: INFO: redis-operator-b7597fc6c-fhsq9 from qiniu-redis started at 2019-06-06 05:55:00 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.555: INFO: 	Container redis-operator ready: true, restart count 0
+Jun 18 09:10:23.555: INFO: kibana-58f596b5d4-gprzs from kube-system started at 2019-06-09 10:42:30 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.555: INFO: 	Container kibana ready: true, restart count 0
+Jun 18 09:10:23.555: INFO: csi-rbd-ceph-csi-rbd-provisioner-0 from default started at 2019-06-15 04:42:56 +0000 UTC (3 container statuses recorded)
+Jun 18 09:10:23.555: INFO: 	Container csi-provisioner ready: true, restart count 0
+Jun 18 09:10:23.555: INFO: 	Container csi-rbdplugin ready: true, restart count 0
+Jun 18 09:10:23.555: INFO: 	Container csi-snapshotter ready: true, restart count 0
+Jun 18 09:10:23.555: INFO: calico-kube-controllers-5ffbcb76cf-km64s from kube-system started at 2019-06-06 06:34:55 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.555: INFO: 	Container calico-kube-controllers ready: true, restart count 0
+Jun 18 09:10:23.555: INFO: prometheus-operator-prometheus-node-exporter-ctlvb from kube-system started at 2019-05-16 08:39:36 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.555: INFO: 	Container node-exporter ready: true, restart count 0
+Jun 18 09:10:23.555: INFO: qce-clair-6f69f7554d-2hpxb from qce started at 2019-06-08 07:24:41 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.555: INFO: 	Container clair ready: true, restart count 0
+Jun 18 09:10:23.555: INFO: csi-cephfs-ceph-csi-cephfs-attacher-0 from default started at 2019-05-14 08:47:42 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.555: INFO: 	Container csi-cephfsplugin-attacher ready: true, restart count 0
+Jun 18 09:10:23.555: INFO: rabbitmq-operator-845b85b447-qx5nm from qiniu-rabbitmq started at 2019-06-15 05:32:51 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.555: INFO: 	Container rabbitmq-operator ready: true, restart count 0
+Jun 18 09:10:23.555: INFO: calico-node-vfj4h from kube-system started at 2019-05-14 06:16:49 +0000 UTC (2 container statuses recorded)
+Jun 18 09:10:23.555: INFO: 	Container calico-node ready: true, restart count 0
+Jun 18 09:10:23.555: INFO: 	Container install-cni ready: true, restart count 0
+Jun 18 09:10:23.555: INFO: csi-cephfs-ceph-csi-cephfs-nodeplugin-c2hjw from default started at 2019-05-14 08:47:42 +0000 UTC (2 container statuses recorded)
+Jun 18 09:10:23.555: INFO: 	Container csi-cephfsplugin ready: true, restart count 0
+Jun 18 09:10:23.555: INFO: 	Container driver-registrar ready: true, restart count 0
+Jun 18 09:10:23.555: INFO: alert-apiserver-5f887ff458-dcdcn from kube-system started at 2019-06-13 06:50:57 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.555: INFO: 	Container alert-apiserver ready: true, restart count 0
+Jun 18 09:10:23.555: INFO: csi-rbd-ceph-csi-rbd-nodeplugin-mncbd from default started at 2019-05-14 08:47:33 +0000 UTC (2 container statuses recorded)
+Jun 18 09:10:23.555: INFO: 	Container csi-rbdplugin ready: true, restart count 0
+Jun 18 09:10:23.555: INFO: 	Container driver-registrar ready: true, restart count 0
+Jun 18 09:10:23.555: INFO: 
+Logging pods the kubelet thinks is on node node3 before test
+Jun 18 09:10:23.569: INFO: tiller-deploy-555696dfc8-gvznf from kube-system started at 2019-05-14 08:33:12 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.569: INFO: 	Container tiller ready: true, restart count 0
+Jun 18 09:10:23.569: INFO: csi-cephfs-ceph-csi-cephfs-nodeplugin-tnz48 from default started at 2019-05-14 08:47:42 +0000 UTC (2 container statuses recorded)
+Jun 18 09:10:23.569: INFO: 	Container csi-cephfsplugin ready: true, restart count 0
+Jun 18 09:10:23.569: INFO: 	Container driver-registrar ready: true, restart count 0
+Jun 18 09:10:23.569: INFO: qce-postgres-stolon-sentinel-b6bcb4448-c4nmj from qce started at 2019-05-14 09:40:16 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.569: INFO: 	Container stolon ready: true, restart count 0
+Jun 18 09:10:23.569: INFO: qce-postgres-stolon-proxy-78b9bc58d8-pg92h from qce started at 2019-05-14 09:40:16 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.569: INFO: 	Container stolon ready: true, restart count 0
+Jun 18 09:10:23.569: INFO: prometheus-operator-prometheus-blackbox-exporter-5d4cbbf54vzmk6 from kube-system started at 2019-05-16 08:39:36 +0000 UTC (2 container statuses recorded)
+Jun 18 09:10:23.569: INFO: 	Container blackbox-exporter ready: true, restart count 0
+Jun 18 09:10:23.569: INFO: 	Container configmap-reload ready: true, restart count 0
+Jun 18 09:10:23.569: INFO: prometheus-operator-kube-state-metrics-969f69894-p5bbm from kube-system started at 2019-05-16 08:39:36 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.569: INFO: 	Container kube-state-metrics ready: true, restart count 0
+Jun 18 09:10:23.569: INFO: prometheus-operator-grafana-86b99c77dd-cmbdv from kube-system started at 2019-05-16 08:39:36 +0000 UTC (2 container statuses recorded)
+Jun 18 09:10:23.569: INFO: 	Container grafana ready: true, restart count 0
+Jun 18 09:10:23.569: INFO: 	Container grafana-sc-dashboard ready: true, restart count 39
+Jun 18 09:10:23.569: INFO: alert-apiserver-etcd-6d744f7648-llfwf from kube-system started at 2019-06-13 06:49:42 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.569: INFO: 	Container alert-apiserver-etcd ready: true, restart count 0
+Jun 18 09:10:23.569: INFO: prometheus-operator-prometheus-node-exporter-84pmd from kube-system started at 2019-05-16 08:39:36 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.569: INFO: 	Container node-exporter ready: true, restart count 0
+Jun 18 09:10:23.569: INFO: qce-jenkins-0 from qce started at 2019-06-16 18:40:16 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.569: INFO: 	Container qce-jenkins ready: true, restart count 0
+Jun 18 09:10:23.569: INFO: csi-rbd-ceph-csi-rbd-nodeplugin-gxvpm from default started at 2019-05-14 08:47:33 +0000 UTC (2 container statuses recorded)
+Jun 18 09:10:23.569: INFO: 	Container csi-rbdplugin ready: true, restart count 0
+Jun 18 09:10:23.569: INFO: 	Container driver-registrar ready: true, restart count 0
+Jun 18 09:10:23.569: INFO: logkit-poc-znzg2 from kube-system started at 2019-06-18 06:27:20 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.569: INFO: 	Container logkit-poc ready: true, restart count 0
+Jun 18 09:10:23.569: INFO: kube-proxy-tc77p from kube-system started at 2019-05-14 05:38:50 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.569: INFO: 	Container kube-proxy ready: true, restart count 0
+Jun 18 09:10:23.569: INFO: calico-node-mzvzv from kube-system started at 2019-05-14 06:16:49 +0000 UTC (2 container statuses recorded)
+Jun 18 09:10:23.569: INFO: 	Container calico-node ready: true, restart count 0
+Jun 18 09:10:23.569: INFO: 	Container install-cni ready: true, restart count 0
+Jun 18 09:10:23.569: INFO: mongors-operator-65df599b-wjs4w from qiniu-mongors started at 2019-06-04 11:39:27 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.569: INFO: 	Container mongors-operator ready: true, restart count 0
+Jun 18 09:10:23.569: INFO: qce-portal-deploy-6d799f79df-5lsgc from qce started at 2019-06-17 04:26:28 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.569: INFO: 	Container qce-portal ready: true, restart count 0
+Jun 18 09:10:23.569: INFO: 
+Logging pods the kubelet thinks is on node node4 before test
+Jun 18 09:10:23.579: INFO: kirk-apiserver-doc-6b5f8c7dd8-lm2pv from qce started at 2019-06-18 05:42:55 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.579: INFO: 	Container kirk-apiserver-doc ready: true, restart count 0
+Jun 18 09:10:23.579: INFO: logkit-poc-7shgm from kube-system started at 2019-06-16 19:36:14 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.579: INFO: 	Container logkit-poc ready: true, restart count 0
+Jun 18 09:10:23.579: INFO: csi-cephfs-ceph-csi-cephfs-nodeplugin-7cg42 from default started at 2019-06-16 19:50:32 +0000 UTC (2 container statuses recorded)
+Jun 18 09:10:23.579: INFO: 	Container csi-cephfsplugin ready: true, restart count 0
+Jun 18 09:10:23.579: INFO: 	Container driver-registrar ready: true, restart count 0
+Jun 18 09:10:23.579: INFO: mysqldata-operator-6f447687b6-qdkt8 from qiniu-mysql started at 2019-06-18 03:17:07 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.579: INFO: 	Container mysql-operator ready: true, restart count 0
+Jun 18 09:10:23.579: INFO: prometheus-operator-prometheus-node-exporter-f2zgm from kube-system started at 2019-06-16 19:39:12 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.579: INFO: 	Container node-exporter ready: true, restart count 0
+Jun 18 09:10:23.579: INFO: kube-proxy-2vsgc from kube-system started at 2019-06-16 19:50:32 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.579: INFO: 	Container kube-proxy ready: true, restart count 0
+Jun 18 09:10:23.579: INFO: mysql-operator-v2-645fcc7f6c-l9dtm from qiniu-mysql started at 2019-06-18 03:19:36 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.579: INFO: 	Container mysql-operator ready: true, restart count 0
+Jun 18 09:10:23.579: INFO: elasticsearch-c5cc84d5f-ctdmq from kube-system started at 2019-06-18 06:26:40 +0000 UTC (2 container statuses recorded)
+Jun 18 09:10:23.579: INFO: 	Container elasticsearch ready: true, restart count 0
+Jun 18 09:10:23.579: INFO: 	Container es-rotate ready: true, restart count 0
+Jun 18 09:10:23.579: INFO: calico-node-fhsvk from kube-system started at 2019-06-16 19:53:03 +0000 UTC (2 container statuses recorded)
+Jun 18 09:10:23.579: INFO: 	Container calico-node ready: true, restart count 0
+Jun 18 09:10:23.579: INFO: 	Container install-cni ready: true, restart count 0
+Jun 18 09:10:23.579: INFO: csi-rbd-ceph-csi-rbd-nodeplugin-q2jtp from default started at 2019-06-16 19:51:06 +0000 UTC (2 container statuses recorded)
+Jun 18 09:10:23.579: INFO: 	Container csi-rbdplugin ready: true, restart count 0
+Jun 18 09:10:23.579: INFO: 	Container driver-registrar ready: true, restart count 0
+Jun 18 09:10:23.579: INFO: 
+Logging pods the kubelet thinks is on node node5 before test
+Jun 18 09:10:23.592: INFO: calico-node-fmzrt from kube-system started at 2019-05-14 06:16:49 +0000 UTC (2 container statuses recorded)
+Jun 18 09:10:23.592: INFO: 	Container calico-node ready: true, restart count 0
+Jun 18 09:10:23.592: INFO: 	Container install-cni ready: true, restart count 0
+Jun 18 09:10:23.592: INFO: csi-cephfs-ceph-csi-cephfs-nodeplugin-jfmbb from default started at 2019-05-14 08:47:42 +0000 UTC (2 container statuses recorded)
+Jun 18 09:10:23.592: INFO: 	Container csi-cephfsplugin ready: true, restart count 0
+Jun 18 09:10:23.592: INFO: 	Container driver-registrar ready: true, restart count 0
+Jun 18 09:10:23.592: INFO: qce-postgres-stolon-proxy-78b9bc58d8-8pp2x from qce started at 2019-05-14 09:40:16 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.592: INFO: 	Container stolon ready: true, restart count 0
+Jun 18 09:10:23.592: INFO: onetimeurl-controller-745fc87d5d-g58jg from qce started at 2019-05-14 10:16:10 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.592: INFO: 	Container onetimeurl-controller ready: true, restart count 0
+Jun 18 09:10:23.592: INFO: logkit-poc-5z5cm from kube-system started at 2019-05-17 03:17:51 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.592: INFO: 	Container logkit-poc ready: true, restart count 0
+Jun 18 09:10:23.592: INFO: csi-rbd-ceph-csi-rbd-nodeplugin-42fl8 from default started at 2019-05-14 08:47:33 +0000 UTC (2 container statuses recorded)
+Jun 18 09:10:23.592: INFO: 	Container csi-rbdplugin ready: true, restart count 0
+Jun 18 09:10:23.592: INFO: 	Container driver-registrar ready: true, restart count 0
+Jun 18 09:10:23.592: INFO: csi-rbd-ceph-csi-rbd-attacher-0 from default started at 2019-05-14 08:47:33 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.592: INFO: 	Container csi-rbdplugin-attacher ready: true, restart count 0
+Jun 18 09:10:23.592: INFO: sonobuoy from heptio-sonobuoy started at 2019-06-18 07:13:06 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.592: INFO: 	Container kube-sonobuoy ready: true, restart count 0
+Jun 18 09:10:23.592: INFO: alertmanager-prometheus-operator-alertmanager-0 from kube-system started at 2019-05-16 08:39:44 +0000 UTC (2 container statuses recorded)
+Jun 18 09:10:23.592: INFO: 	Container alertmanager ready: true, restart count 0
+Jun 18 09:10:23.592: INFO: 	Container config-reloader ready: true, restart count 0
+Jun 18 09:10:23.592: INFO: sonobuoy-e2e-job-2b96015867f64622 from heptio-sonobuoy started at 2019-06-18 07:13:12 +0000 UTC (2 container statuses recorded)
+Jun 18 09:10:23.592: INFO: 	Container e2e ready: true, restart count 0
+Jun 18 09:10:23.592: INFO: 	Container sonobuoy-worker ready: true, restart count 0
+Jun 18 09:10:23.592: INFO: qce-user-manual-deploy-867778f667-dcl87 from qce started at 2019-05-27 12:26:46 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.592: INFO: 	Container qce-user-manual ready: true, restart count 0
+Jun 18 09:10:23.592: INFO: prometheus-operator-prometheus-node-exporter-9g6lb from kube-system started at 2019-05-16 08:39:36 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.592: INFO: 	Container node-exporter ready: true, restart count 0
+Jun 18 09:10:23.592: INFO: prometheus-prometheus-operator-prometheus-1 from kube-system started at 2019-06-13 11:42:12 +0000 UTC (3 container statuses recorded)
+Jun 18 09:10:23.592: INFO: 	Container prometheus ready: true, restart count 0
+Jun 18 09:10:23.592: INFO: 	Container prometheus-config-reloader ready: true, restart count 0
+Jun 18 09:10:23.592: INFO: 	Container rules-configmap-reloader ready: true, restart count 0
+Jun 18 09:10:23.592: INFO: alert-dispatcher-58d448f9c9-4mxgj from kube-system started at 2019-06-15 12:19:08 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.592: INFO: 	Container alert-dispatcher ready: true, restart count 0
+Jun 18 09:10:23.592: INFO: kube-proxy-lqpj7 from kube-system started at 2019-05-14 05:38:48 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.592: INFO: 	Container kube-proxy ready: true, restart count 0
+Jun 18 09:10:23.592: INFO: qce-postgres-stolon-sentinel-b6bcb4448-jbrkl from qce started at 2019-05-14 09:40:16 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.592: INFO: 	Container stolon ready: true, restart count 0
+Jun 18 09:10:23.592: INFO: prometheus-operator-operator-654b9d4648-lflhd from kube-system started at 2019-05-16 08:39:36 +0000 UTC (1 container statuses recorded)
+Jun 18 09:10:23.592: INFO: 	Container prometheus-operator ready: true, restart count 0
+[It] validates that NodeSelector is respected if not matching  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Trying to schedule Pod with nonempty NodeSelector.
+STEP: Considering event: 
+Type = [Warning], Name = [restricted-pod.15a93fead1600c0f], Reason = [FailedScheduling], Message = [0/6 nodes are available: 6 node(s) didn't match node selector.]
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 09:10:24.632: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-sched-pred-t856c" for this suite.
+Jun 18 09:10:32.650: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 09:10:32.703: INFO: namespace: e2e-tests-sched-pred-t856c, resource: bindings, ignored listing per whitelist
+Jun 18 09:10:32.967: INFO: namespace e2e-tests-sched-pred-t856c deletion completed in 8.331590444s
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:70
+
+• [SLOW TEST:10.355 seconds]
+[sig-scheduling] SchedulerPredicates [Serial]
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:22
+  validates that NodeSelector is respected if not matching  [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected configMap 
+  should be consumable from pods in volume with mappings and Item mode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+Jun 18 09:10:32.967: INFO: >>> kubeConfig: /tmp/kubeconfig-656024001
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-fznc9
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings and Item mode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name projected-configmap-test-volume-map-ed83aa89-91a8-11e9-bbf5-0e74dabf3615
+STEP: Creating a pod to test consume configMaps
+Jun 18 09:10:33.698: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-ed847f37-91a8-11e9-bbf5-0e74dabf3615" in namespace "e2e-tests-projected-fznc9" to be "success or failure"
+Jun 18 09:10:33.700: INFO: Pod "pod-projected-configmaps-ed847f37-91a8-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 1.827726ms
+Jun 18 09:10:35.705: INFO: Pod "pod-projected-configmaps-ed847f37-91a8-11e9-bbf5-0e74dabf3615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.006657169s
+Jun 18 09:10:37.707: INFO: Pod "pod-projected-configmaps-ed847f37-91a8-11e9-bbf5-0e74dabf3615": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.009164581s
+STEP: Saw pod success
+Jun 18 09:10:37.707: INFO: Pod "pod-projected-configmaps-ed847f37-91a8-11e9-bbf5-0e74dabf3615" satisfied condition "success or failure"
+Jun 18 09:10:37.711: INFO: Trying to get logs from node node5 pod pod-projected-configmaps-ed847f37-91a8-11e9-bbf5-0e74dabf3615 container projected-configmap-volume-test: 
+STEP: delete the pod
+Jun 18 09:10:37.729: INFO: Waiting for pod pod-projected-configmaps-ed847f37-91a8-11e9-bbf5-0e74dabf3615 to disappear
+Jun 18 09:10:37.730: INFO: Pod pod-projected-configmaps-ed847f37-91a8-11e9-bbf5-0e74dabf3615 no longer exists
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+Jun 18 09:10:37.730: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-fznc9" for this suite.
+Jun 18 09:10:48.544: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 18 09:10:48.681: INFO: namespace: e2e-tests-projected-fznc9, resource: bindings, ignored listing per whitelist
+Jun 18 09:10:50.567: INFO: namespace e2e-tests-projected-fznc9 deletion completed in 12.833510673s
+
+• [SLOW TEST:17.600 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:34
+  should be consumable from pods in volume with mappings and Item mode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.5-beta.0.54+2166946f41b36d/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSJun 18 09:10:50.567: INFO: Running AfterSuite actions on all nodes
+Jun 18 09:10:50.567: INFO: Running AfterSuite actions on node 1
+Jun 18 09:10:50.567: INFO: Skipping dumping logs from cluster
+
+Ran 200 of 2161 Specs in 7052.985 seconds
+SUCCESS! -- 200 Passed | 0 Failed | 0 Pending | 1961 Skipped PASS
+
+Ginkgo ran 1 suite in 1h57m33.82172656s
+Test Suite Passed
diff --git a/v1.13/qiniucloud/junit_01.xml b/v1.13/qiniucloud/junit_01.xml
new file mode 100644
index 0000000000..cdba9e8435
--- /dev/null
+++ b/v1.13/qiniucloud/junit_01.xml
@@ -0,0 +1,6086 @@
+
+  
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+  
\ No newline at end of file