From f9b4fd215be37827f554374464c54e27efeaac50 Mon Sep 17 00:00:00 2001 From: Jaga Santagostino Date: Fri, 21 Jun 2019 22:46:06 +0200 Subject: [PATCH] add kubernetes-fury-aws conformance tests for k8s v1.15 (#631) --- v1.15/kubernetes-fury-aws/PRODUCT.yaml | 9 + v1.15/kubernetes-fury-aws/README.md | 9 + v1.15/kubernetes-fury-aws/e2e.log | 10588 +++++++++++++++++++ v1.15/kubernetes-fury-aws/junit_01.xml | 12806 +++++++++++++++++++++++ 4 files changed, 23412 insertions(+) create mode 100644 v1.15/kubernetes-fury-aws/PRODUCT.yaml create mode 100644 v1.15/kubernetes-fury-aws/README.md create mode 100644 v1.15/kubernetes-fury-aws/e2e.log create mode 100644 v1.15/kubernetes-fury-aws/junit_01.xml diff --git a/v1.15/kubernetes-fury-aws/PRODUCT.yaml b/v1.15/kubernetes-fury-aws/PRODUCT.yaml new file mode 100644 index 0000000000..5cd999dca4 --- /dev/null +++ b/v1.15/kubernetes-fury-aws/PRODUCT.yaml @@ -0,0 +1,9 @@ +vendor: SIGHUP +name: kubernetes-fury-aws +version: k8s-1.15.0-fury-1.0.0 +website_url: https://github.com/sighupio/fury-kubernetes-aws +repo_url: https://github.com/sighupio/fury-kubernetes-aws +documentation_url: https://github.com/sighupio/fury-kubernetes-aws/blob/master/README.md +product_logo_url: https://s3.us-west-1.wasabisys.com/sighup-logos/fury-logo.svg +type: installer +description: Kubernetes Fury for AWS is a battle-tested distribution purely based on upstream Kubernetes that deploy a production grade Kubernetes Cluster at scale with a comprehensive Cloud Native stack implemented with top notch CNCF components. diff --git a/v1.15/kubernetes-fury-aws/README.md b/v1.15/kubernetes-fury-aws/README.md new file mode 100644 index 0000000000..0727692b0e --- /dev/null +++ b/v1.15/kubernetes-fury-aws/README.md @@ -0,0 +1,9 @@ +# Conformance testing kubernetes fury aws + +## Steps to reproduce + +- Install kubernetes-fury-aws following the [documentation](https://github.com/sighupio/fury-kubernetes-aws/blob/master/README.md) +- `get -u -v github.com/heptio/sonobuoy` +- `sonobuoy run --kubeconfig [path to kubeconfig]` +- `sonobuoy retrieve . --kubeconfig [path to kubeconfig]` +- `mkdir ./results; tar xzf *.tar.gz -C ./results` diff --git a/v1.15/kubernetes-fury-aws/e2e.log b/v1.15/kubernetes-fury-aws/e2e.log new file mode 100644 index 0000000000..a10cc91357 --- /dev/null +++ b/v1.15/kubernetes-fury-aws/e2e.log @@ -0,0 +1,10588 @@ +I0620 09:59:43.198185 15 test_context.go:406] Using a temporary kubeconfig file from in-cluster config : /tmp/kubeconfig-878248618 +I0620 09:59:43.200550 15 e2e.go:241] Starting e2e run "731aade8-62cc-405e-9385-a3927c938cdd" on Ginkgo node 1 +Running Suite: Kubernetes e2e suite +=================================== +Random Seed: 1561024781 - Will randomize all specs +Will run 215 of 4411 specs + +Jun 20 09:59:43.510: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618 +Jun 20 09:59:43.513: INFO: Waiting up to 30m0s for all (but 0) nodes to be schedulable +Jun 20 09:59:43.527: INFO: Waiting up to 10m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready +Jun 20 09:59:43.556: INFO: 21 / 21 pods in namespace 'kube-system' are running and ready (0 seconds elapsed) +Jun 20 09:59:43.556: INFO: expected 2 pod replicas in namespace 'kube-system', 2 are Running and Ready. +Jun 20 09:59:43.556: INFO: Waiting up to 5m0s for all daemonsets in namespace 'kube-system' to start +Jun 20 09:59:43.564: INFO: 5 / 5 pods ready in namespace 'kube-system' in daemonset 'kube-proxy' (0 seconds elapsed) +Jun 20 09:59:43.564: INFO: 5 / 5 pods ready in namespace 'kube-system' in daemonset 'weave-net' (0 seconds elapsed) +Jun 20 09:59:43.564: INFO: e2e test version: v1.15.0 +Jun 20 09:59:43.566: INFO: kube-apiserver version: v1.15.0 +SSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +STEP: Creating a kubernetes client +Jun 20 09:59:43.566: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618 +STEP: Building a namespace api object, basename emptydir +Jun 20 09:59:43.612: INFO: No PodSecurityPolicies found; assuming PodSecurityPolicy is disabled. +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +STEP: Creating a pod to test emptydir 0644 on node default medium +Jun 20 09:59:43.626: INFO: Waiting up to 5m0s for pod "pod-90d7c8d5-790a-491b-a0f0-e60adc33116d" in namespace "emptydir-792" to be "success or failure" +Jun 20 09:59:43.629: INFO: Pod "pod-90d7c8d5-790a-491b-a0f0-e60adc33116d": Phase="Pending", Reason="", readiness=false. Elapsed: 3.103577ms +Jun 20 09:59:45.633: INFO: Pod "pod-90d7c8d5-790a-491b-a0f0-e60adc33116d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.00672808s +Jun 20 09:59:47.637: INFO: Pod "pod-90d7c8d5-790a-491b-a0f0-e60adc33116d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.010887458s +STEP: Saw pod success +Jun 20 09:59:47.637: INFO: Pod "pod-90d7c8d5-790a-491b-a0f0-e60adc33116d" satisfied condition "success or failure" +Jun 20 09:59:47.641: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-90d7c8d5-790a-491b-a0f0-e60adc33116d container test-container: +STEP: delete the pod +Jun 20 09:59:47.669: INFO: Waiting for pod pod-90d7c8d5-790a-491b-a0f0-e60adc33116d to disappear +Jun 20 09:59:47.672: INFO: Pod pod-90d7c8d5-790a-491b-a0f0-e60adc33116d no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151 +Jun 20 09:59:47.672: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-792" for this suite. +Jun 20 09:59:53.685: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 20 09:59:53.755: INFO: namespace emptydir-792 deletion completed in 6.08084354s + +• [SLOW TEST:10.189 seconds] +[sig-storage] EmptyDir volumes +/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41 + should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +------------------------------ +SSSSSSSSSSSSSSSSS +------------------------------ +[k8s.io] Variable Expansion + should allow composing env vars into new env vars [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +[BeforeEach] [k8s.io] Variable Expansion + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +STEP: Creating a kubernetes client +Jun 20 09:59:53.755: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618 +STEP: Building a namespace api object, basename var-expansion +STEP: Waiting for a default service account to be provisioned in namespace +[It] should allow composing env vars into new env vars [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +STEP: Creating a pod to test env composition +Jun 20 09:59:53.796: INFO: Waiting up to 5m0s for pod "var-expansion-6849ed17-e6bf-4081-ab6d-aa4bb403f4c8" in namespace "var-expansion-2150" to be "success or failure" +Jun 20 09:59:53.799: INFO: Pod "var-expansion-6849ed17-e6bf-4081-ab6d-aa4bb403f4c8": Phase="Pending", Reason="", readiness=false. Elapsed: 3.419587ms +Jun 20 09:59:56.037: INFO: Pod "var-expansion-6849ed17-e6bf-4081-ab6d-aa4bb403f4c8": Phase="Pending", Reason="", readiness=false. Elapsed: 2.241464716s +Jun 20 09:59:58.259: INFO: Pod "var-expansion-6849ed17-e6bf-4081-ab6d-aa4bb403f4c8": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.463138626s +STEP: Saw pod success +Jun 20 09:59:58.259: INFO: Pod "var-expansion-6849ed17-e6bf-4081-ab6d-aa4bb403f4c8" satisfied condition "success or failure" +Jun 20 09:59:58.270: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod var-expansion-6849ed17-e6bf-4081-ab6d-aa4bb403f4c8 container dapi-container: +STEP: delete the pod +Jun 20 09:59:58.293: INFO: Waiting for pod var-expansion-6849ed17-e6bf-4081-ab6d-aa4bb403f4c8 to disappear +Jun 20 09:59:58.296: INFO: Pod var-expansion-6849ed17-e6bf-4081-ab6d-aa4bb403f4c8 no longer exists +[AfterEach] [k8s.io] Variable Expansion + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151 +Jun 20 09:59:58.296: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "var-expansion-2150" for this suite. +Jun 20 10:00:04.319: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 20 10:00:04.393: INFO: namespace var-expansion-2150 deletion completed in 6.09377227s + +• [SLOW TEST:10.638 seconds] +[k8s.io] Variable Expansion +/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 + should allow composing env vars into new env vars [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected secret + should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +[BeforeEach] [sig-storage] Projected secret + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +STEP: Creating a kubernetes client +Jun 20 10:00:04.396: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +STEP: Creating projection with secret that has name projected-secret-test-map-0445be25-830c-4d78-b137-2312ba6355fb +STEP: Creating a pod to test consume secrets +Jun 20 10:00:04.443: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-9493bfc1-8c87-48da-91c0-b156d3b84101" in namespace "projected-404" to be "success or failure" +Jun 20 10:00:04.449: INFO: Pod "pod-projected-secrets-9493bfc1-8c87-48da-91c0-b156d3b84101": Phase="Pending", Reason="", readiness=false. Elapsed: 5.720893ms +Jun 20 10:00:06.453: INFO: Pod "pod-projected-secrets-9493bfc1-8c87-48da-91c0-b156d3b84101": Phase="Pending", Reason="", readiness=false. Elapsed: 2.009070742s +Jun 20 10:00:08.457: INFO: Pod "pod-projected-secrets-9493bfc1-8c87-48da-91c0-b156d3b84101": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.012913425s +STEP: Saw pod success +Jun 20 10:00:08.457: INFO: Pod "pod-projected-secrets-9493bfc1-8c87-48da-91c0-b156d3b84101" satisfied condition "success or failure" +Jun 20 10:00:08.459: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-projected-secrets-9493bfc1-8c87-48da-91c0-b156d3b84101 container projected-secret-volume-test: +STEP: delete the pod +Jun 20 10:00:08.479: INFO: Waiting for pod pod-projected-secrets-9493bfc1-8c87-48da-91c0-b156d3b84101 to disappear +Jun 20 10:00:08.483: INFO: Pod pod-projected-secrets-9493bfc1-8c87-48da-91c0-b156d3b84101 no longer exists +[AfterEach] [sig-storage] Projected secret + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151 +Jun 20 10:00:08.483: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-404" for this suite. +Jun 20 10:00:14.498: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 20 10:00:14.570: INFO: namespace projected-404 deletion completed in 6.082497697s + +• [SLOW TEST:10.174 seconds] +[sig-storage] Projected secret +/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:33 + should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +------------------------------ +[sig-node] Downward API + should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +[BeforeEach] [sig-node] Downward API + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +STEP: Creating a kubernetes client +Jun 20 10:00:14.570: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +STEP: Creating a pod to test downward api env vars +Jun 20 10:00:14.614: INFO: Waiting up to 5m0s for pod "downward-api-d6eaa118-08fe-4e81-98cd-fccb0a8649e6" in namespace "downward-api-1800" to be "success or failure" +Jun 20 10:00:14.619: INFO: Pod "downward-api-d6eaa118-08fe-4e81-98cd-fccb0a8649e6": Phase="Pending", Reason="", readiness=false. Elapsed: 4.517671ms +Jun 20 10:00:16.622: INFO: Pod "downward-api-d6eaa118-08fe-4e81-98cd-fccb0a8649e6": Phase="Pending", Reason="", readiness=false. Elapsed: 2.007672205s +Jun 20 10:00:18.630: INFO: Pod "downward-api-d6eaa118-08fe-4e81-98cd-fccb0a8649e6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.016312332s +STEP: Saw pod success +Jun 20 10:00:18.630: INFO: Pod "downward-api-d6eaa118-08fe-4e81-98cd-fccb0a8649e6" satisfied condition "success or failure" +Jun 20 10:00:18.633: INFO: Trying to get logs from node ip-10-100-12-226.eu-west-1.compute.internal pod downward-api-d6eaa118-08fe-4e81-98cd-fccb0a8649e6 container dapi-container: +STEP: delete the pod +Jun 20 10:00:18.659: INFO: Waiting for pod downward-api-d6eaa118-08fe-4e81-98cd-fccb0a8649e6 to disappear +Jun 20 10:00:18.661: INFO: Pod downward-api-d6eaa118-08fe-4e81-98cd-fccb0a8649e6 no longer exists +[AfterEach] [sig-node] Downward API + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151 +Jun 20 10:00:18.661: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-1800" for this suite. +Jun 20 10:00:24.674: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 20 10:00:24.750: INFO: namespace downward-api-1800 deletion completed in 6.085555134s + +• [SLOW TEST:10.180 seconds] +[sig-node] Downward API +/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:32 + should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] ConfigMap + binary data should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +[BeforeEach] [sig-storage] ConfigMap + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +STEP: Creating a kubernetes client +Jun 20 10:00:24.751: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] binary data should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +STEP: Creating configMap with name configmap-test-upd-0e4d134f-7a9d-4c5c-8fff-1fc771839b3e +STEP: Creating the pod +STEP: Waiting for pod with text data +STEP: Waiting for pod with binary data +[AfterEach] [sig-storage] ConfigMap + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151 +Jun 20 10:00:26.819: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-1485" for this suite. +Jun 20 10:00:48.834: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 20 10:00:48.942: INFO: namespace configmap-1485 deletion completed in 22.120028699s + +• [SLOW TEST:24.191 seconds] +[sig-storage] ConfigMap +/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32 + binary data should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +------------------------------ +SS +------------------------------ +[sig-storage] Secrets + should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +[BeforeEach] [sig-storage] Secrets + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +STEP: Creating a kubernetes client +Jun 20 10:00:48.942: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618 +STEP: Building a namespace api object, basename secrets +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +STEP: Creating secret with name secret-test-map-acc605af-8f66-4b2a-9fd8-be6fd24f116a +STEP: Creating a pod to test consume secrets +Jun 20 10:00:48.994: INFO: Waiting up to 5m0s for pod "pod-secrets-4b497651-2d4b-4ead-b423-8b52cfc7a4b9" in namespace "secrets-33" to be "success or failure" +Jun 20 10:00:49.003: INFO: Pod "pod-secrets-4b497651-2d4b-4ead-b423-8b52cfc7a4b9": Phase="Pending", Reason="", readiness=false. Elapsed: 9.133366ms +Jun 20 10:00:51.006: INFO: Pod "pod-secrets-4b497651-2d4b-4ead-b423-8b52cfc7a4b9": Phase="Pending", Reason="", readiness=false. Elapsed: 2.012179608s +Jun 20 10:00:53.009: INFO: Pod "pod-secrets-4b497651-2d4b-4ead-b423-8b52cfc7a4b9": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.015344145s +STEP: Saw pod success +Jun 20 10:00:53.009: INFO: Pod "pod-secrets-4b497651-2d4b-4ead-b423-8b52cfc7a4b9" satisfied condition "success or failure" +Jun 20 10:00:53.012: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-secrets-4b497651-2d4b-4ead-b423-8b52cfc7a4b9 container secret-volume-test: +STEP: delete the pod +Jun 20 10:00:53.031: INFO: Waiting for pod pod-secrets-4b497651-2d4b-4ead-b423-8b52cfc7a4b9 to disappear +Jun 20 10:00:53.033: INFO: Pod pod-secrets-4b497651-2d4b-4ead-b423-8b52cfc7a4b9 no longer exists +[AfterEach] [sig-storage] Secrets + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151 +Jun 20 10:00:53.033: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-33" for this suite. +Jun 20 10:00:59.046: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 20 10:00:59.117: INFO: namespace secrets-33 deletion completed in 6.080906047s + +• [SLOW TEST:10.175 seconds] +[sig-storage] Secrets +/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:33 + should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +------------------------------ +SSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] ConfigMap + should be consumable from pods in volume with mappings as non-root [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +[BeforeEach] [sig-storage] ConfigMap + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +STEP: Creating a kubernetes client +Jun 20 10:00:59.117: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings as non-root [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +STEP: Creating configMap with name configmap-test-volume-map-7a96b973-e267-482c-9f17-6e6d3e052085 +STEP: Creating a pod to test consume configMaps +Jun 20 10:00:59.161: INFO: Waiting up to 5m0s for pod "pod-configmaps-a9ee7560-222e-4cf3-be89-a372d2f68a58" in namespace "configmap-110" to be "success or failure" +Jun 20 10:00:59.167: INFO: Pod "pod-configmaps-a9ee7560-222e-4cf3-be89-a372d2f68a58": Phase="Pending", Reason="", readiness=false. Elapsed: 5.938034ms +Jun 20 10:01:01.171: INFO: Pod "pod-configmaps-a9ee7560-222e-4cf3-be89-a372d2f68a58": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.009495394s +STEP: Saw pod success +Jun 20 10:01:01.171: INFO: Pod "pod-configmaps-a9ee7560-222e-4cf3-be89-a372d2f68a58" satisfied condition "success or failure" +Jun 20 10:01:01.174: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-configmaps-a9ee7560-222e-4cf3-be89-a372d2f68a58 container configmap-volume-test: +STEP: delete the pod +Jun 20 10:01:01.210: INFO: Waiting for pod pod-configmaps-a9ee7560-222e-4cf3-be89-a372d2f68a58 to disappear +Jun 20 10:01:01.212: INFO: Pod pod-configmaps-a9ee7560-222e-4cf3-be89-a372d2f68a58 no longer exists +[AfterEach] [sig-storage] ConfigMap + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151 +Jun 20 10:01:01.212: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-110" for this suite. +Jun 20 10:01:07.225: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 20 10:01:07.298: INFO: namespace configmap-110 deletion completed in 6.083294366s + +• [SLOW TEST:8.181 seconds] +[sig-storage] ConfigMap +/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32 + should be consumable from pods in volume with mappings as non-root [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] CustomResourceDefinition resources Simple CustomResourceDefinition + creating/deleting custom resource definition objects works [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +STEP: Creating a kubernetes client +Jun 20 10:01:07.300: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618 +STEP: Building a namespace api object, basename custom-resource-definition +STEP: Waiting for a default service account to be provisioned in namespace +[It] creating/deleting custom resource definition objects works [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +Jun 20 10:01:07.336: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618 +[AfterEach] [sig-api-machinery] CustomResourceDefinition resources + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151 +Jun 20 10:01:08.459: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "custom-resource-definition-1705" for this suite. +Jun 20 10:01:14.472: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 20 10:01:14.544: INFO: namespace custom-resource-definition-1705 deletion completed in 6.082403621s + +• [SLOW TEST:7.245 seconds] +[sig-api-machinery] CustomResourceDefinition resources +/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + Simple CustomResourceDefinition + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/custom_resource_definition.go:35 + creating/deleting custom resource definition objects works [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +------------------------------ +SSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Proxy server + should support --unix-socket=/path [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +STEP: Creating a kubernetes client +Jun 20 10:01:14.545: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:221 +[It] should support --unix-socket=/path [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +STEP: Starting the proxy +Jun 20 10:01:14.577: INFO: Asynchronously running '/usr/local/bin/kubectl kubectl --kubeconfig=/tmp/kubeconfig-878248618 proxy --unix-socket=/tmp/kubectl-proxy-unix661143041/test' +STEP: retrieving proxy /api/ output +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151 +Jun 20 10:01:14.629: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-8162" for this suite. +Jun 20 10:01:20.644: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 20 10:01:20.726: INFO: namespace kubectl-8162 deletion completed in 6.093352576s + +• [SLOW TEST:6.181 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + [k8s.io] Proxy server + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 + should support --unix-socket=/path [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +------------------------------ +SSSSS +------------------------------ +[sig-storage] Downward API volume + should update labels on modification [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +STEP: Creating a kubernetes client +Jun 20 10:01:20.726: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39 +[It] should update labels on modification [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +STEP: Creating the pod +Jun 20 10:01:23.326: INFO: Successfully updated pod "labelsupdate9fc2edfd-5d73-48bb-b46a-59acc3b50d39" +[AfterEach] [sig-storage] Downward API volume + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151 +Jun 20 10:01:25.345: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-9307" for this suite. +Jun 20 10:01:47.358: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 20 10:01:47.430: INFO: namespace downward-api-9307 deletion completed in 22.081796757s + +• [SLOW TEST:26.704 seconds] +[sig-storage] Downward API volume +/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34 + should update labels on modification [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +------------------------------ +SSSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + should perform canary updates and phased rolling updates of template modifications [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +STEP: Creating a kubernetes client +Jun 20 10:01:47.430: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618 +STEP: Building a namespace api object, basename statefulset +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:60 +[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:75 +STEP: Creating service test in namespace statefulset-7961 +[It] should perform canary updates and phased rolling updates of template modifications [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +STEP: Creating a new StatefulSet +Jun 20 10:01:47.477: INFO: Found 0 stateful pods, waiting for 3 +Jun 20 10:01:57.481: INFO: Found 2 stateful pods, waiting for 3 +Jun 20 10:02:07.481: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true +Jun 20 10:02:07.481: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true +Jun 20 10:02:07.481: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Updating stateful set template: update image from docker.io/library/nginx:1.14-alpine to docker.io/library/nginx:1.15-alpine +Jun 20 10:02:07.512: INFO: Updating stateful set ss2 +STEP: Creating a new revision +STEP: Not applying an update when the partition is greater than the number of replicas +STEP: Performing a canary update +Jun 20 10:02:17.543: INFO: Updating stateful set ss2 +Jun 20 10:02:17.549: INFO: Waiting for Pod statefulset-7961/ss2-2 to have revision ss2-6c5cd755cd update revision ss2-7c9b54fd4c +STEP: Restoring Pods to the correct revision when they are deleted +Jun 20 10:02:27.590: INFO: Found 2 stateful pods, waiting for 3 +Jun 20 10:02:37.593: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true +Jun 20 10:02:37.593: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true +Jun 20 10:02:37.593: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Performing a phased rolling update +Jun 20 10:02:37.615: INFO: Updating stateful set ss2 +Jun 20 10:02:37.621: INFO: Waiting for Pod statefulset-7961/ss2-1 to have revision ss2-6c5cd755cd update revision ss2-7c9b54fd4c +Jun 20 10:02:47.647: INFO: Updating stateful set ss2 +Jun 20 10:02:47.654: INFO: Waiting for StatefulSet statefulset-7961/ss2 to complete update +Jun 20 10:02:47.654: INFO: Waiting for Pod statefulset-7961/ss2-0 to have revision ss2-6c5cd755cd update revision ss2-7c9b54fd4c +[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:86 +Jun 20 10:02:57.660: INFO: Deleting all statefulset in ns statefulset-7961 +Jun 20 10:02:57.671: INFO: Scaling statefulset ss2 to 0 +Jun 20 10:03:17.687: INFO: Waiting for statefulset status.replicas updated to 0 +Jun 20 10:03:17.690: INFO: Deleting statefulset ss2 +[AfterEach] [sig-apps] StatefulSet + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151 +Jun 20 10:03:17.700: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "statefulset-7961" for this suite. +Jun 20 10:03:23.713: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 20 10:03:23.779: INFO: namespace statefulset-7961 deletion completed in 6.076045738s + +• [SLOW TEST:96.349 seconds] +[sig-apps] StatefulSet +/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 + should perform canary updates and phased rolling updates of template modifications [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +------------------------------ +SSSS +------------------------------ +[sig-storage] Secrets + should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +[BeforeEach] [sig-storage] Secrets + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +STEP: Creating a kubernetes client +Jun 20 10:03:23.779: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618 +STEP: Building a namespace api object, basename secrets +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +STEP: Creating secret with name secret-test-7281ee07-3927-422f-9b49-743f6ae325c7 +STEP: Creating a pod to test consume secrets +Jun 20 10:03:23.825: INFO: Waiting up to 5m0s for pod "pod-secrets-816c6722-9572-4cb9-91c3-cbc10062653d" in namespace "secrets-212" to be "success or failure" +Jun 20 10:03:23.830: INFO: Pod "pod-secrets-816c6722-9572-4cb9-91c3-cbc10062653d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.187222ms +Jun 20 10:03:25.834: INFO: Pod "pod-secrets-816c6722-9572-4cb9-91c3-cbc10062653d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.008772089s +STEP: Saw pod success +Jun 20 10:03:25.834: INFO: Pod "pod-secrets-816c6722-9572-4cb9-91c3-cbc10062653d" satisfied condition "success or failure" +Jun 20 10:03:25.837: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-secrets-816c6722-9572-4cb9-91c3-cbc10062653d container secret-volume-test: +STEP: delete the pod +Jun 20 10:03:25.854: INFO: Waiting for pod pod-secrets-816c6722-9572-4cb9-91c3-cbc10062653d to disappear +Jun 20 10:03:25.856: INFO: Pod pod-secrets-816c6722-9572-4cb9-91c3-cbc10062653d no longer exists +[AfterEach] [sig-storage] Secrets + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151 +Jun 20 10:03:25.856: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-212" for this suite. +Jun 20 10:03:31.869: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 20 10:03:31.944: INFO: namespace secrets-212 deletion completed in 6.084456445s + +• [SLOW TEST:8.165 seconds] +[sig-storage] Secrets +/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:33 + should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +------------------------------ +SSSS +------------------------------ +[sig-storage] ConfigMap + should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +[BeforeEach] [sig-storage] ConfigMap + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +STEP: Creating a kubernetes client +Jun 20 10:03:31.944: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +STEP: Creating configMap with name configmap-test-volume-b3502cf3-f42d-4efa-b53c-80e630ad8cfc +STEP: Creating a pod to test consume configMaps +Jun 20 10:03:31.987: INFO: Waiting up to 5m0s for pod "pod-configmaps-c34a1556-5fb2-41a9-a841-e190bf473e77" in namespace "configmap-1681" to be "success or failure" +Jun 20 10:03:31.989: INFO: Pod "pod-configmaps-c34a1556-5fb2-41a9-a841-e190bf473e77": Phase="Pending", Reason="", readiness=false. Elapsed: 2.371092ms +Jun 20 10:03:33.993: INFO: Pod "pod-configmaps-c34a1556-5fb2-41a9-a841-e190bf473e77": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006571197s +STEP: Saw pod success +Jun 20 10:03:33.993: INFO: Pod "pod-configmaps-c34a1556-5fb2-41a9-a841-e190bf473e77" satisfied condition "success or failure" +Jun 20 10:03:33.996: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-configmaps-c34a1556-5fb2-41a9-a841-e190bf473e77 container configmap-volume-test: +STEP: delete the pod +Jun 20 10:03:34.014: INFO: Waiting for pod pod-configmaps-c34a1556-5fb2-41a9-a841-e190bf473e77 to disappear +Jun 20 10:03:34.017: INFO: Pod pod-configmaps-c34a1556-5fb2-41a9-a841-e190bf473e77 no longer exists +[AfterEach] [sig-storage] ConfigMap + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151 +Jun 20 10:03:34.017: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-1681" for this suite. +Jun 20 10:03:40.029: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 20 10:03:40.129: INFO: namespace configmap-1681 deletion completed in 6.108675459s + +• [SLOW TEST:8.185 seconds] +[sig-storage] ConfigMap +/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32 + should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +------------------------------ +SS +------------------------------ +[k8s.io] InitContainer [NodeConformance] + should not start app containers if init containers fail on a RestartAlways pod [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +[BeforeEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +STEP: Creating a kubernetes client +Jun 20 10:03:40.129: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618 +STEP: Building a namespace api object, basename init-container +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:44 +[It] should not start app containers if init containers fail on a RestartAlways pod [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +STEP: creating the pod +Jun 20 10:03:40.161: INFO: PodSpec: initContainers in spec.initContainers +Jun 20 10:04:26.545: INFO: init container has failed twice: &v1.Pod{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"pod-init-647f2e39-e714-42dc-9b34-145f304acfeb", GenerateName:"", Namespace:"init-container-4908", SelfLink:"/api/v1/namespaces/init-container-4908/pods/pod-init-647f2e39-e714-42dc-9b34-145f304acfeb", UID:"5109725c-ba8f-435f-8965-07c76089a21a", ResourceVersion:"7796", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63696621820, loc:(*time.Location)(0x80bb5c0)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"name":"foo", "time":"161648208"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Initializers:(*v1.Initializers)(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Spec:v1.PodSpec{Volumes:[]v1.Volume{v1.Volume{Name:"default-token-bg2m7", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(nil), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(0xc001778180), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil)}}}, InitContainers:[]v1.Container{v1.Container{Name:"init1", Image:"docker.io/library/busybox:1.29", Command:[]string{"/bin/false"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-bg2m7", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}, v1.Container{Name:"init2", Image:"docker.io/library/busybox:1.29", Command:[]string{"/bin/true"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-bg2m7", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, Containers:[]v1.Container{v1.Container{Name:"run1", Image:"k8s.gcr.io/pause:3.1", Command:[]string(nil), Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:52428800, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"52428800", Format:"DecimalSI"}}, Requests:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:52428800, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"52428800", Format:"DecimalSI"}}}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-bg2m7", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc001920278), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string(nil), ServiceAccountName:"default", DeprecatedServiceAccount:"default", AutomountServiceAccountToken:(*bool)(nil), NodeName:"ip-10-100-10-111.eu-west-1.compute.internal", HostNetwork:false, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc001906180), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(nil), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration{v1.Toleration{Key:"node.kubernetes.io/not-ready", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc0019202f0)}, v1.Toleration{Key:"node.kubernetes.io/unreachable", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc001920310)}}, HostAliases:[]v1.HostAlias(nil), PriorityClassName:"", Priority:(*int32)(0xc001920318), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(0xc00192031c), PreemptionPolicy:(*v1.PreemptionPolicy)(nil)}, Status:v1.PodStatus{Phase:"Pending", Conditions:[]v1.PodCondition{v1.PodCondition{Type:"Initialized", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696621820, loc:(*time.Location)(0x80bb5c0)}}, Reason:"ContainersNotInitialized", Message:"containers with incomplete status: [init1 init2]"}, v1.PodCondition{Type:"Ready", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696621820, loc:(*time.Location)(0x80bb5c0)}}, Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"ContainersReady", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696621820, loc:(*time.Location)(0x80bb5c0)}}, Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"PodScheduled", Status:"True", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696621820, loc:(*time.Location)(0x80bb5c0)}}, Reason:"", Message:""}}, Message:"", Reason:"", NominatedNodeName:"", HostIP:"10.100.10.111", PodIP:"10.38.0.2", StartTime:(*v1.Time)(0xc0011445c0), InitContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"init1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(0xc001d561c0)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(0xc001d56310)}, Ready:false, RestartCount:3, Image:"busybox:1.29", ImageID:"docker-pullable://busybox@sha256:8ccbac733d19c0dd4d70b4f0c1e12245b5fa3ad24758a11035ee505c629c0796", ContainerID:"docker://1161e4765d8bc5a844d38e067eef06f37f138ea78721c17e9f17be3b5cfba88e"}, v1.ContainerStatus{Name:"init2", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc001144700), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"docker.io/library/busybox:1.29", ImageID:"", ContainerID:""}}, ContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"run1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc001144660), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"k8s.gcr.io/pause:3.1", ImageID:"", ContainerID:""}}, QOSClass:"Guaranteed"}} +[AfterEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151 +Jun 20 10:04:26.545: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "init-container-4908" for this suite. +Jun 20 10:04:48.560: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 20 10:04:48.634: INFO: namespace init-container-4908 deletion completed in 22.084597547s + +• [SLOW TEST:68.505 seconds] +[k8s.io] InitContainer [NodeConformance] +/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 + should not start app containers if init containers fail on a RestartAlways pod [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +------------------------------ +SSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl run job + should create a job from an image when restart is OnFailure [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +STEP: Creating a kubernetes client +Jun 20 10:04:48.634: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:221 +[BeforeEach] [k8s.io] Kubectl run job + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1613 +[It] should create a job from an image when restart is OnFailure [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +STEP: running the image docker.io/library/nginx:1.14-alpine +Jun 20 10:04:48.668: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 run e2e-test-nginx-job --restart=OnFailure --generator=job/v1 --image=docker.io/library/nginx:1.14-alpine --namespace=kubectl-9769' +Jun 20 10:04:48.929: INFO: stderr: "kubectl run --generator=job/v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n" +Jun 20 10:04:48.929: INFO: stdout: "job.batch/e2e-test-nginx-job created\n" +STEP: verifying the job e2e-test-nginx-job was created +[AfterEach] [k8s.io] Kubectl run job + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1618 +Jun 20 10:04:48.935: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 delete jobs e2e-test-nginx-job --namespace=kubectl-9769' +Jun 20 10:04:49.010: INFO: stderr: "" +Jun 20 10:04:49.010: INFO: stdout: "job.batch \"e2e-test-nginx-job\" deleted\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151 +Jun 20 10:04:49.010: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-9769" for this suite. +Jun 20 10:04:55.024: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 20 10:04:55.089: INFO: namespace kubectl-9769 deletion completed in 6.075584095s + +• [SLOW TEST:6.455 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + [k8s.io] Kubectl run job + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 + should create a job from an image when restart is OnFailure [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +------------------------------ +SSSSSSSSSSSSSSSSSS +------------------------------ +[k8s.io] Pods + should support remote command execution over websockets [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +STEP: Creating a kubernetes client +Jun 20 10:04:55.089: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618 +STEP: Building a namespace api object, basename pods +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:164 +[It] should support remote command execution over websockets [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +Jun 20 10:04:55.122: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618 +STEP: creating the pod +STEP: submitting the pod to kubernetes +[AfterEach] [k8s.io] Pods + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151 +Jun 20 10:04:57.267: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pods-564" for this suite. +Jun 20 10:05:39.304: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 20 10:05:39.414: INFO: namespace pods-564 deletion completed in 42.135314753s + +• [SLOW TEST:44.324 seconds] +[k8s.io] Pods +/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 + should support remote command execution over websockets [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] HostPath + should give a volume the correct mode [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +[BeforeEach] [sig-storage] HostPath + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +STEP: Creating a kubernetes client +Jun 20 10:05:39.414: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618 +STEP: Building a namespace api object, basename hostpath +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] HostPath + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/host_path.go:37 +[It] should give a volume the correct mode [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +STEP: Creating a pod to test hostPath mode +Jun 20 10:05:39.454: INFO: Waiting up to 5m0s for pod "pod-host-path-test" in namespace "hostpath-9740" to be "success or failure" +Jun 20 10:05:39.457: INFO: Pod "pod-host-path-test": Phase="Pending", Reason="", readiness=false. Elapsed: 2.821484ms +Jun 20 10:05:41.460: INFO: Pod "pod-host-path-test": Phase="Pending", Reason="", readiness=false. Elapsed: 2.005869755s +Jun 20 10:05:43.463: INFO: Pod "pod-host-path-test": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.009197086s +STEP: Saw pod success +Jun 20 10:05:43.463: INFO: Pod "pod-host-path-test" satisfied condition "success or failure" +Jun 20 10:05:43.466: INFO: Trying to get logs from node ip-10-100-12-226.eu-west-1.compute.internal pod pod-host-path-test container test-container-1: +STEP: delete the pod +Jun 20 10:05:43.485: INFO: Waiting for pod pod-host-path-test to disappear +Jun 20 10:05:43.489: INFO: Pod pod-host-path-test no longer exists +[AfterEach] [sig-storage] HostPath + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151 +Jun 20 10:05:43.489: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "hostpath-9740" for this suite. +Jun 20 10:05:49.502: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 20 10:05:49.571: INFO: namespace hostpath-9740 deletion completed in 6.078904135s + +• [SLOW TEST:10.157 seconds] +[sig-storage] HostPath +/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/host_path.go:34 + should give a volume the correct mode [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Downward API volume + should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +STEP: Creating a kubernetes client +Jun 20 10:05:49.571: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39 +[It] should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +STEP: Creating a pod to test downward API volume plugin +Jun 20 10:05:49.610: INFO: Waiting up to 5m0s for pod "downwardapi-volume-c19dea6a-7498-4459-a8ac-b16c3586f36f" in namespace "downward-api-2111" to be "success or failure" +Jun 20 10:05:49.616: INFO: Pod "downwardapi-volume-c19dea6a-7498-4459-a8ac-b16c3586f36f": Phase="Pending", Reason="", readiness=false. Elapsed: 6.018168ms +Jun 20 10:05:51.619: INFO: Pod "downwardapi-volume-c19dea6a-7498-4459-a8ac-b16c3586f36f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.009324524s +STEP: Saw pod success +Jun 20 10:05:51.619: INFO: Pod "downwardapi-volume-c19dea6a-7498-4459-a8ac-b16c3586f36f" satisfied condition "success or failure" +Jun 20 10:05:51.622: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod downwardapi-volume-c19dea6a-7498-4459-a8ac-b16c3586f36f container client-container: +STEP: delete the pod +Jun 20 10:05:51.641: INFO: Waiting for pod downwardapi-volume-c19dea6a-7498-4459-a8ac-b16c3586f36f to disappear +Jun 20 10:05:51.643: INFO: Pod downwardapi-volume-c19dea6a-7498-4459-a8ac-b16c3586f36f no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151 +Jun 20 10:05:51.643: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-2111" for this suite. +Jun 20 10:05:57.655: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 20 10:05:57.723: INFO: namespace downward-api-2111 deletion completed in 6.077081847s + +• [SLOW TEST:8.152 seconds] +[sig-storage] Downward API volume +/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34 + should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +------------------------------ +SSSSSSSSSSSSSS +------------------------------ +[sig-storage] Downward API volume + should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +STEP: Creating a kubernetes client +Jun 20 10:05:57.723: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39 +[It] should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +STEP: Creating a pod to test downward API volume plugin +Jun 20 10:05:57.769: INFO: Waiting up to 5m0s for pod "downwardapi-volume-677250e8-8324-4ac3-a82f-cd52090b8994" in namespace "downward-api-1161" to be "success or failure" +Jun 20 10:05:57.772: INFO: Pod "downwardapi-volume-677250e8-8324-4ac3-a82f-cd52090b8994": Phase="Pending", Reason="", readiness=false. Elapsed: 3.287821ms +Jun 20 10:05:59.775: INFO: Pod "downwardapi-volume-677250e8-8324-4ac3-a82f-cd52090b8994": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006630805s +STEP: Saw pod success +Jun 20 10:05:59.775: INFO: Pod "downwardapi-volume-677250e8-8324-4ac3-a82f-cd52090b8994" satisfied condition "success or failure" +Jun 20 10:05:59.778: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod downwardapi-volume-677250e8-8324-4ac3-a82f-cd52090b8994 container client-container: +STEP: delete the pod +Jun 20 10:05:59.797: INFO: Waiting for pod downwardapi-volume-677250e8-8324-4ac3-a82f-cd52090b8994 to disappear +Jun 20 10:05:59.801: INFO: Pod downwardapi-volume-677250e8-8324-4ac3-a82f-cd52090b8994 no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151 +Jun 20 10:05:59.801: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-1161" for this suite. +Jun 20 10:06:05.829: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 20 10:06:05.902: INFO: namespace downward-api-1161 deletion completed in 6.093004522s + +• [SLOW TEST:8.179 seconds] +[sig-storage] Downward API volume +/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34 + should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] ConfigMap + should be consumable from pods in volume as non-root [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +[BeforeEach] [sig-storage] ConfigMap + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +STEP: Creating a kubernetes client +Jun 20 10:06:05.904: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume as non-root [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +STEP: Creating configMap with name configmap-test-volume-303c531c-9003-4e7c-a896-2340fa4a480f +STEP: Creating a pod to test consume configMaps +Jun 20 10:06:05.949: INFO: Waiting up to 5m0s for pod "pod-configmaps-9cc3eb18-d830-4331-a2b9-de0f6444bbf8" in namespace "configmap-3998" to be "success or failure" +Jun 20 10:06:05.957: INFO: Pod "pod-configmaps-9cc3eb18-d830-4331-a2b9-de0f6444bbf8": Phase="Pending", Reason="", readiness=false. Elapsed: 8.491541ms +Jun 20 10:06:07.961: INFO: Pod "pod-configmaps-9cc3eb18-d830-4331-a2b9-de0f6444bbf8": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.011864962s +STEP: Saw pod success +Jun 20 10:06:07.961: INFO: Pod "pod-configmaps-9cc3eb18-d830-4331-a2b9-de0f6444bbf8" satisfied condition "success or failure" +Jun 20 10:06:07.963: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-configmaps-9cc3eb18-d830-4331-a2b9-de0f6444bbf8 container configmap-volume-test: +STEP: delete the pod +Jun 20 10:06:07.979: INFO: Waiting for pod pod-configmaps-9cc3eb18-d830-4331-a2b9-de0f6444bbf8 to disappear +Jun 20 10:06:07.982: INFO: Pod pod-configmaps-9cc3eb18-d830-4331-a2b9-de0f6444bbf8 no longer exists +[AfterEach] [sig-storage] ConfigMap + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151 +Jun 20 10:06:07.982: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-3998" for this suite. +Jun 20 10:06:13.996: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 20 10:06:14.066: INFO: namespace configmap-3998 deletion completed in 6.081055384s + +• [SLOW TEST:8.162 seconds] +[sig-storage] ConfigMap +/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32 + should be consumable from pods in volume as non-root [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +------------------------------ +SSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Subpath Atomic writer volumes + should support subpaths with secret pod [LinuxOnly] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +[BeforeEach] [sig-storage] Subpath + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +STEP: Creating a kubernetes client +Jun 20 10:06:14.066: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618 +STEP: Building a namespace api object, basename subpath +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] Atomic writer volumes + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:37 +STEP: Setting up data +[It] should support subpaths with secret pod [LinuxOnly] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +STEP: Creating pod pod-subpath-test-secret-2czq +STEP: Creating a pod to test atomic-volume-subpath +Jun 20 10:06:14.121: INFO: Waiting up to 5m0s for pod "pod-subpath-test-secret-2czq" in namespace "subpath-6696" to be "success or failure" +Jun 20 10:06:14.125: INFO: Pod "pod-subpath-test-secret-2czq": Phase="Pending", Reason="", readiness=false. Elapsed: 3.799208ms +Jun 20 10:06:16.129: INFO: Pod "pod-subpath-test-secret-2czq": Phase="Pending", Reason="", readiness=false. Elapsed: 2.008097392s +Jun 20 10:06:18.133: INFO: Pod "pod-subpath-test-secret-2czq": Phase="Running", Reason="", readiness=true. Elapsed: 4.011689237s +Jun 20 10:06:20.136: INFO: Pod "pod-subpath-test-secret-2czq": Phase="Running", Reason="", readiness=true. Elapsed: 6.015049662s +Jun 20 10:06:22.140: INFO: Pod "pod-subpath-test-secret-2czq": Phase="Running", Reason="", readiness=true. Elapsed: 8.018701508s +Jun 20 10:06:24.143: INFO: Pod "pod-subpath-test-secret-2czq": Phase="Running", Reason="", readiness=true. Elapsed: 10.021946658s +Jun 20 10:06:26.147: INFO: Pod "pod-subpath-test-secret-2czq": Phase="Running", Reason="", readiness=true. Elapsed: 12.026141281s +Jun 20 10:06:28.151: INFO: Pod "pod-subpath-test-secret-2czq": Phase="Running", Reason="", readiness=true. Elapsed: 14.02965151s +Jun 20 10:06:30.155: INFO: Pod "pod-subpath-test-secret-2czq": Phase="Running", Reason="", readiness=true. Elapsed: 16.033606621s +Jun 20 10:06:32.158: INFO: Pod "pod-subpath-test-secret-2czq": Phase="Running", Reason="", readiness=true. Elapsed: 18.036716125s +Jun 20 10:06:34.161: INFO: Pod "pod-subpath-test-secret-2czq": Phase="Running", Reason="", readiness=true. Elapsed: 20.039922348s +Jun 20 10:06:36.164: INFO: Pod "pod-subpath-test-secret-2czq": Phase="Running", Reason="", readiness=true. Elapsed: 22.042976532s +Jun 20 10:06:38.168: INFO: Pod "pod-subpath-test-secret-2czq": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.046761197s +STEP: Saw pod success +Jun 20 10:06:38.168: INFO: Pod "pod-subpath-test-secret-2czq" satisfied condition "success or failure" +Jun 20 10:06:38.171: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-subpath-test-secret-2czq container test-container-subpath-secret-2czq: +STEP: delete the pod +Jun 20 10:06:38.190: INFO: Waiting for pod pod-subpath-test-secret-2czq to disappear +Jun 20 10:06:38.192: INFO: Pod pod-subpath-test-secret-2czq no longer exists +STEP: Deleting pod pod-subpath-test-secret-2czq +Jun 20 10:06:38.192: INFO: Deleting pod "pod-subpath-test-secret-2czq" in namespace "subpath-6696" +[AfterEach] [sig-storage] Subpath + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151 +Jun 20 10:06:38.194: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "subpath-6696" for this suite. +Jun 20 10:06:44.207: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 20 10:06:44.279: INFO: namespace subpath-6696 deletion completed in 6.080968187s + +• [SLOW TEST:30.213 seconds] +[sig-storage] Subpath +/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22 + Atomic writer volumes + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:33 + should support subpaths with secret pod [LinuxOnly] [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +------------------------------ +SSSSSSSSSSSSSSSS +------------------------------ +[sig-network] Proxy version v1 + should proxy logs on node with explicit kubelet port using proxy subresource [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +[BeforeEach] version v1 + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +STEP: Creating a kubernetes client +Jun 20 10:06:44.280: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618 +STEP: Building a namespace api object, basename proxy +STEP: Waiting for a default service account to be provisioned in namespace +[It] should proxy logs on node with explicit kubelet port using proxy subresource [Conformance] + /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697 +Jun 20 10:06:44.321: INFO: (0) /api/v1/nodes/ip-10-100-10-111.eu-west-1.compute.internal:10250/proxy/logs/:
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test emptydir volume type on tmpfs
+Jun 20 10:06:50.509: INFO: Waiting up to 5m0s for pod "pod-a39f80b2-e163-4d7e-9b8e-c9ae1ac16c24" in namespace "emptydir-9837" to be "success or failure"
+Jun 20 10:06:50.513: INFO: Pod "pod-a39f80b2-e163-4d7e-9b8e-c9ae1ac16c24": Phase="Pending", Reason="", readiness=false. Elapsed: 4.537465ms
+Jun 20 10:06:52.517: INFO: Pod "pod-a39f80b2-e163-4d7e-9b8e-c9ae1ac16c24": Phase="Pending", Reason="", readiness=false. Elapsed: 2.007967883s
+Jun 20 10:06:54.520: INFO: Pod "pod-a39f80b2-e163-4d7e-9b8e-c9ae1ac16c24": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.010984796s
+STEP: Saw pod success
+Jun 20 10:06:54.520: INFO: Pod "pod-a39f80b2-e163-4d7e-9b8e-c9ae1ac16c24" satisfied condition "success or failure"
+Jun 20 10:06:54.522: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-a39f80b2-e163-4d7e-9b8e-c9ae1ac16c24 container test-container: 
+STEP: delete the pod
+Jun 20 10:06:54.539: INFO: Waiting for pod pod-a39f80b2-e163-4d7e-9b8e-c9ae1ac16c24 to disappear
+Jun 20 10:06:54.543: INFO: Pod pod-a39f80b2-e163-4d7e-9b8e-c9ae1ac16c24 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:06:54.543: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-9837" for this suite.
+Jun 20 10:07:00.555: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:07:00.626: INFO: namespace emptydir-9837 deletion completed in 6.080179436s
+
+• [SLOW TEST:10.158 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSS
+------------------------------
+[sig-network] DNS 
+  should provide /etc/hosts entries for the cluster [LinuxOnly] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-network] DNS
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:07:00.626: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename dns
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide /etc/hosts entries for the cluster [LinuxOnly] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Running these commands on wheezy: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-1.dns-test-service.dns-6030.svc.cluster.local)" && echo OK > /results/wheezy_hosts@dns-querier-1.dns-test-service.dns-6030.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/wheezy_hosts@dns-querier-1;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-6030.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;sleep 1; done
+
+STEP: Running these commands on jessie: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-1.dns-test-service.dns-6030.svc.cluster.local)" && echo OK > /results/jessie_hosts@dns-querier-1.dns-test-service.dns-6030.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/jessie_hosts@dns-querier-1;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-6030.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;sleep 1; done
+
+STEP: creating a pod to probe /etc/hosts
+STEP: submitting the pod to kubernetes
+STEP: retrieving the pod
+STEP: looking for the results for each expected name from probers
+Jun 20 10:07:12.708: INFO: Unable to read jessie_udp@PodARecord from pod dns-6030/dns-test-847e7cb6-3dee-4662-b695-6429f9ebe31a: the server could not find the requested resource (get pods dns-test-847e7cb6-3dee-4662-b695-6429f9ebe31a)
+Jun 20 10:07:12.712: INFO: Unable to read jessie_tcp@PodARecord from pod dns-6030/dns-test-847e7cb6-3dee-4662-b695-6429f9ebe31a: the server could not find the requested resource (get pods dns-test-847e7cb6-3dee-4662-b695-6429f9ebe31a)
+Jun 20 10:07:12.712: INFO: Lookups using dns-6030/dns-test-847e7cb6-3dee-4662-b695-6429f9ebe31a failed for: [jessie_udp@PodARecord jessie_tcp@PodARecord]
+
+Jun 20 10:07:17.742: INFO: DNS probes using dns-6030/dns-test-847e7cb6-3dee-4662-b695-6429f9ebe31a succeeded
+
+STEP: deleting the pod
+[AfterEach] [sig-network] DNS
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:07:17.756: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "dns-6030" for this suite.
+Jun 20 10:07:23.779: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:07:23.843: INFO: namespace dns-6030 deletion completed in 6.082263284s
+
+• [SLOW TEST:23.217 seconds]
+[sig-network] DNS
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23
+  should provide /etc/hosts entries for the cluster [LinuxOnly] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should update annotations on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:07:23.844: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should update annotations on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating the pod
+Jun 20 10:07:26.415: INFO: Successfully updated pod "annotationupdate390cbe6c-77ff-4a1f-a26d-9a917b24f7fa"
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:07:30.443: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-8589" for this suite.
+Jun 20 10:07:52.460: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:07:52.558: INFO: namespace downward-api-8589 deletion completed in 22.111808864s
+
+• [SLOW TEST:28.715 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should update annotations on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should not be blocked by dependency circle [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:07:52.559: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename gc
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should not be blocked by dependency circle [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+Jun 20 10:07:52.621: INFO: pod1.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod3", UID:"b460df72-ecb4-4280-aa5d-d8f37c96d31e", Controller:(*bool)(0xc001920fb6), BlockOwnerDeletion:(*bool)(0xc001920fb7)}}
+Jun 20 10:07:52.638: INFO: pod2.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod1", UID:"90bf5387-a6fb-4cde-9ae9-67a99d4ec69d", Controller:(*bool)(0xc0029e747e), BlockOwnerDeletion:(*bool)(0xc0029e747f)}}
+Jun 20 10:07:52.650: INFO: pod3.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod2", UID:"fffc06f3-b435-4b9c-b934-426946abcbca", Controller:(*bool)(0xc0029e7616), BlockOwnerDeletion:(*bool)(0xc0029e7617)}}
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:07:57.660: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "gc-4350" for this suite.
+Jun 20 10:08:03.673: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:08:03.741: INFO: namespace gc-4350 deletion completed in 6.078045054s
+
+• [SLOW TEST:11.182 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23
+  should not be blocked by dependency circle [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:08:03.741: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test downward API volume plugin
+Jun 20 10:08:03.783: INFO: Waiting up to 5m0s for pod "downwardapi-volume-2904ef27-190e-42d9-965c-3d8c4f176240" in namespace "projected-5187" to be "success or failure"
+Jun 20 10:08:03.786: INFO: Pod "downwardapi-volume-2904ef27-190e-42d9-965c-3d8c4f176240": Phase="Pending", Reason="", readiness=false. Elapsed: 3.243583ms
+Jun 20 10:08:05.790: INFO: Pod "downwardapi-volume-2904ef27-190e-42d9-965c-3d8c4f176240": Phase="Pending", Reason="", readiness=false. Elapsed: 2.006567762s
+Jun 20 10:08:07.794: INFO: Pod "downwardapi-volume-2904ef27-190e-42d9-965c-3d8c4f176240": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.011075838s
+STEP: Saw pod success
+Jun 20 10:08:07.794: INFO: Pod "downwardapi-volume-2904ef27-190e-42d9-965c-3d8c4f176240" satisfied condition "success or failure"
+Jun 20 10:08:07.797: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod downwardapi-volume-2904ef27-190e-42d9-965c-3d8c4f176240 container client-container: 
+STEP: delete the pod
+Jun 20 10:08:07.820: INFO: Waiting for pod downwardapi-volume-2904ef27-190e-42d9-965c-3d8c4f176240 to disappear
+Jun 20 10:08:07.825: INFO: Pod downwardapi-volume-2904ef27-190e-42d9-965c-3d8c4f176240 no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:08:07.826: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-5187" for this suite.
+Jun 20 10:08:13.847: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:08:13.917: INFO: namespace projected-5187 deletion completed in 6.083374692s
+
+• [SLOW TEST:10.176 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSS
+------------------------------
+[k8s.io] Docker Containers 
+  should be able to override the image's default command and arguments [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:08:13.917: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename containers
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be able to override the image's default command and arguments [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test override all
+Jun 20 10:08:13.956: INFO: Waiting up to 5m0s for pod "client-containers-4f169625-f723-4f1f-934a-1003b8012abf" in namespace "containers-6425" to be "success or failure"
+Jun 20 10:08:13.959: INFO: Pod "client-containers-4f169625-f723-4f1f-934a-1003b8012abf": Phase="Pending", Reason="", readiness=false. Elapsed: 2.397894ms
+Jun 20 10:08:15.962: INFO: Pod "client-containers-4f169625-f723-4f1f-934a-1003b8012abf": Phase="Pending", Reason="", readiness=false. Elapsed: 2.005879325s
+Jun 20 10:08:17.965: INFO: Pod "client-containers-4f169625-f723-4f1f-934a-1003b8012abf": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.009145449s
+STEP: Saw pod success
+Jun 20 10:08:17.965: INFO: Pod "client-containers-4f169625-f723-4f1f-934a-1003b8012abf" satisfied condition "success or failure"
+Jun 20 10:08:17.968: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod client-containers-4f169625-f723-4f1f-934a-1003b8012abf container test-container: 
+STEP: delete the pod
+Jun 20 10:08:17.988: INFO: Waiting for pod client-containers-4f169625-f723-4f1f-934a-1003b8012abf to disappear
+Jun 20 10:08:17.990: INFO: Pod client-containers-4f169625-f723-4f1f-934a-1003b8012abf no longer exists
+[AfterEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:08:17.990: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "containers-6425" for this suite.
+Jun 20 10:08:24.004: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:08:24.068: INFO: namespace containers-6425 deletion completed in 6.075071024s
+
+• [SLOW TEST:10.151 seconds]
+[k8s.io] Docker Containers
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  should be able to override the image's default command and arguments [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] ReplicaSet 
+  should serve a basic image on each replica with a public image  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-apps] ReplicaSet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:08:24.069: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename replicaset
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should serve a basic image on each replica with a public image  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+Jun 20 10:08:24.101: INFO: Creating ReplicaSet my-hostname-basic-8640cccd-746d-4820-8a4b-2fae2fa6d4e1
+Jun 20 10:08:24.111: INFO: Pod name my-hostname-basic-8640cccd-746d-4820-8a4b-2fae2fa6d4e1: Found 0 pods out of 1
+Jun 20 10:08:29.114: INFO: Pod name my-hostname-basic-8640cccd-746d-4820-8a4b-2fae2fa6d4e1: Found 1 pods out of 1
+Jun 20 10:08:29.114: INFO: Ensuring a pod for ReplicaSet "my-hostname-basic-8640cccd-746d-4820-8a4b-2fae2fa6d4e1" is running
+Jun 20 10:08:29.118: INFO: Pod "my-hostname-basic-8640cccd-746d-4820-8a4b-2fae2fa6d4e1-x4xsb" is running (conditions: [{Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-06-20 10:08:24 +0000 UTC Reason: Message:} {Type:Ready Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-06-20 10:08:27 +0000 UTC Reason: Message:} {Type:ContainersReady Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-06-20 10:08:27 +0000 UTC Reason: Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-06-20 10:08:24 +0000 UTC Reason: Message:}])
+Jun 20 10:08:29.118: INFO: Trying to dial the pod
+Jun 20 10:08:34.130: INFO: Controller my-hostname-basic-8640cccd-746d-4820-8a4b-2fae2fa6d4e1: Got expected result from replica 1 [my-hostname-basic-8640cccd-746d-4820-8a4b-2fae2fa6d4e1-x4xsb]: "my-hostname-basic-8640cccd-746d-4820-8a4b-2fae2fa6d4e1-x4xsb", 1 of 1 required successes so far
+[AfterEach] [sig-apps] ReplicaSet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:08:34.130: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "replicaset-4440" for this suite.
+Jun 20 10:08:40.143: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:08:40.207: INFO: namespace replicaset-4440 deletion completed in 6.074525889s
+
+• [SLOW TEST:16.139 seconds]
+[sig-apps] ReplicaSet
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23
+  should serve a basic image on each replica with a public image  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] ConfigMap 
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:08:40.208: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename configmap
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating configMap with name configmap-test-volume-map-8a018b9a-b3e7-4bb7-b968-4ee8bae85e7b
+STEP: Creating a pod to test consume configMaps
+Jun 20 10:08:40.252: INFO: Waiting up to 5m0s for pod "pod-configmaps-af099253-0734-4589-8fa8-545c1fca6686" in namespace "configmap-8695" to be "success or failure"
+Jun 20 10:08:40.259: INFO: Pod "pod-configmaps-af099253-0734-4589-8fa8-545c1fca6686": Phase="Pending", Reason="", readiness=false. Elapsed: 6.597076ms
+Jun 20 10:08:42.265: INFO: Pod "pod-configmaps-af099253-0734-4589-8fa8-545c1fca6686": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013155933s
+Jun 20 10:08:44.268: INFO: Pod "pod-configmaps-af099253-0734-4589-8fa8-545c1fca6686": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.016177581s
+STEP: Saw pod success
+Jun 20 10:08:44.268: INFO: Pod "pod-configmaps-af099253-0734-4589-8fa8-545c1fca6686" satisfied condition "success or failure"
+Jun 20 10:08:44.270: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-configmaps-af099253-0734-4589-8fa8-545c1fca6686 container configmap-volume-test: 
+STEP: delete the pod
+Jun 20 10:08:44.288: INFO: Waiting for pod pod-configmaps-af099253-0734-4589-8fa8-545c1fca6686 to disappear
+Jun 20 10:08:44.289: INFO: Pod pod-configmaps-af099253-0734-4589-8fa8-545c1fca6686 no longer exists
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:08:44.290: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "configmap-8695" for this suite.
+Jun 20 10:08:50.301: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:08:50.369: INFO: namespace configmap-8695 deletion completed in 6.077204296s
+
+• [SLOW TEST:10.162 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSS
+------------------------------
+[sig-storage] Projected configMap 
+  updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:08:50.370: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating projection with configMap that has name projected-configmap-test-upd-3f2ca22f-9029-45ac-9e53-b274a4d9d102
+STEP: Creating the pod
+STEP: Updating configmap projected-configmap-test-upd-3f2ca22f-9029-45ac-9e53-b274a4d9d102
+STEP: waiting to observe update in volume
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:08:54.451: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-3966" for this suite.
+Jun 20 10:09:16.464: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:09:16.539: INFO: namespace projected-3966 deletion completed in 22.085249958s
+
+• [SLOW TEST:26.170 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:33
+  updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSS
+------------------------------
+[sig-api-machinery] Secrets 
+  should fail to create secret due to empty secret key [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-api-machinery] Secrets
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:09:16.539: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename secrets
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should fail to create secret due to empty secret key [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating projection with secret that has name secret-emptykey-test-e98982ca-33bf-441b-b43c-a8ef1817aa9a
+[AfterEach] [sig-api-machinery] Secrets
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:09:16.573: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "secrets-46" for this suite.
+Jun 20 10:09:22.587: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:09:22.653: INFO: namespace secrets-46 deletion completed in 6.077180895s
+
+• [SLOW TEST:6.114 seconds]
+[sig-api-machinery] Secrets
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets.go:31
+  should fail to create secret due to empty secret key [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:09:22.654: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test emptydir 0666 on node default medium
+Jun 20 10:09:22.693: INFO: Waiting up to 5m0s for pod "pod-7e624d51-0421-41c4-bdd1-ef43817152ea" in namespace "emptydir-2356" to be "success or failure"
+Jun 20 10:09:22.697: INFO: Pod "pod-7e624d51-0421-41c4-bdd1-ef43817152ea": Phase="Pending", Reason="", readiness=false. Elapsed: 4.452793ms
+Jun 20 10:09:24.701: INFO: Pod "pod-7e624d51-0421-41c4-bdd1-ef43817152ea": Phase="Pending", Reason="", readiness=false. Elapsed: 2.007763082s
+Jun 20 10:09:26.704: INFO: Pod "pod-7e624d51-0421-41c4-bdd1-ef43817152ea": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.010947644s
+STEP: Saw pod success
+Jun 20 10:09:26.704: INFO: Pod "pod-7e624d51-0421-41c4-bdd1-ef43817152ea" satisfied condition "success or failure"
+Jun 20 10:09:26.706: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-7e624d51-0421-41c4-bdd1-ef43817152ea container test-container: 
+STEP: delete the pod
+Jun 20 10:09:26.722: INFO: Waiting for pod pod-7e624d51-0421-41c4-bdd1-ef43817152ea to disappear
+Jun 20 10:09:26.724: INFO: Pod pod-7e624d51-0421-41c4-bdd1-ef43817152ea no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:09:26.724: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-2356" for this suite.
+Jun 20 10:09:32.736: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:09:32.804: INFO: namespace emptydir-2356 deletion completed in 6.077023052s
+
+• [SLOW TEST:10.150 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSS
+------------------------------
+[sig-scheduling] SchedulerPredicates [Serial] 
+  validates resource limits of pods that are allowed to run  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:09:32.804: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename sched-pred
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:81
+Jun 20 10:09:32.837: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready
+Jun 20 10:09:32.842: INFO: Waiting for terminating namespaces to be deleted...
+Jun 20 10:09:32.845: INFO: 
+Logging pods the kubelet thinks is on node ip-10-100-10-111.eu-west-1.compute.internal before test
+Jun 20 10:09:32.849: INFO: kube-proxy-9j68g from kube-system started at 2019-06-20 09:12:47 +0000 UTC (1 container statuses recorded)
+Jun 20 10:09:32.849: INFO: 	Container kube-proxy ready: true, restart count 0
+Jun 20 10:09:32.849: INFO: weave-net-nh2zg from kube-system started at 2019-06-20 09:35:33 +0000 UTC (2 container statuses recorded)
+Jun 20 10:09:32.849: INFO: 	Container weave ready: true, restart count 0
+Jun 20 10:09:32.849: INFO: 	Container weave-npc ready: true, restart count 0
+Jun 20 10:09:32.849: INFO: sonobuoy from heptio-sonobuoy started at 2019-06-20 09:59:16 +0000 UTC (1 container statuses recorded)
+Jun 20 10:09:32.849: INFO: 	Container kube-sonobuoy ready: true, restart count 0
+Jun 20 10:09:32.849: INFO: sonobuoy-systemd-logs-daemon-set-de69c0149d564b9d-fz4v7 from heptio-sonobuoy started at 2019-06-20 09:59:22 +0000 UTC (2 container statuses recorded)
+Jun 20 10:09:32.849: INFO: 	Container sonobuoy-worker ready: true, restart count 0
+Jun 20 10:09:32.849: INFO: 	Container systemd-logs ready: true, restart count 0
+Jun 20 10:09:32.849: INFO: 
+Logging pods the kubelet thinks is on node ip-10-100-12-226.eu-west-1.compute.internal before test
+Jun 20 10:09:32.855: INFO: weave-net-9x9dh from kube-system started at 2019-06-20 09:35:33 +0000 UTC (2 container statuses recorded)
+Jun 20 10:09:32.855: INFO: 	Container weave ready: true, restart count 0
+Jun 20 10:09:32.855: INFO: 	Container weave-npc ready: true, restart count 0
+Jun 20 10:09:32.855: INFO: sonobuoy-e2e-job-dca131905d74464b from heptio-sonobuoy started at 2019-06-20 09:59:22 +0000 UTC (2 container statuses recorded)
+Jun 20 10:09:32.855: INFO: 	Container e2e ready: true, restart count 0
+Jun 20 10:09:32.855: INFO: 	Container sonobuoy-worker ready: true, restart count 0
+Jun 20 10:09:32.855: INFO: sonobuoy-systemd-logs-daemon-set-de69c0149d564b9d-m4zhg from heptio-sonobuoy started at 2019-06-20 09:59:22 +0000 UTC (2 container statuses recorded)
+Jun 20 10:09:32.855: INFO: 	Container sonobuoy-worker ready: true, restart count 0
+Jun 20 10:09:32.855: INFO: 	Container systemd-logs ready: true, restart count 0
+Jun 20 10:09:32.855: INFO: kube-proxy-4c7sq from kube-system started at 2019-06-20 09:12:47 +0000 UTC (1 container statuses recorded)
+Jun 20 10:09:32.855: INFO: 	Container kube-proxy ready: true, restart count 0
+[It] validates resource limits of pods that are allowed to run  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: verifying the node has the label node ip-10-100-10-111.eu-west-1.compute.internal
+STEP: verifying the node has the label node ip-10-100-12-226.eu-west-1.compute.internal
+Jun 20 10:09:32.889: INFO: Pod sonobuoy requesting resource cpu=0m on Node ip-10-100-10-111.eu-west-1.compute.internal
+Jun 20 10:09:32.889: INFO: Pod sonobuoy-e2e-job-dca131905d74464b requesting resource cpu=0m on Node ip-10-100-12-226.eu-west-1.compute.internal
+Jun 20 10:09:32.889: INFO: Pod sonobuoy-systemd-logs-daemon-set-de69c0149d564b9d-fz4v7 requesting resource cpu=0m on Node ip-10-100-10-111.eu-west-1.compute.internal
+Jun 20 10:09:32.889: INFO: Pod sonobuoy-systemd-logs-daemon-set-de69c0149d564b9d-m4zhg requesting resource cpu=0m on Node ip-10-100-12-226.eu-west-1.compute.internal
+Jun 20 10:09:32.889: INFO: Pod kube-proxy-4c7sq requesting resource cpu=0m on Node ip-10-100-12-226.eu-west-1.compute.internal
+Jun 20 10:09:32.889: INFO: Pod kube-proxy-9j68g requesting resource cpu=0m on Node ip-10-100-10-111.eu-west-1.compute.internal
+Jun 20 10:09:32.889: INFO: Pod weave-net-9x9dh requesting resource cpu=20m on Node ip-10-100-12-226.eu-west-1.compute.internal
+Jun 20 10:09:32.889: INFO: Pod weave-net-nh2zg requesting resource cpu=20m on Node ip-10-100-10-111.eu-west-1.compute.internal
+STEP: Starting Pods to consume most of the cluster CPU.
+STEP: Creating another pod that requires unavailable amount of CPU.
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-1be46266-974b-4168-a88d-cdf25262446c.15a9e04e55e4ca89], Reason = [Scheduled], Message = [Successfully assigned sched-pred-1995/filler-pod-1be46266-974b-4168-a88d-cdf25262446c to ip-10-100-12-226.eu-west-1.compute.internal]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-1be46266-974b-4168-a88d-cdf25262446c.15a9e04e93764147], Reason = [Pulled], Message = [Container image "k8s.gcr.io/pause:3.1" already present on machine]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-1be46266-974b-4168-a88d-cdf25262446c.15a9e04e97d2504b], Reason = [Created], Message = [Created container filler-pod-1be46266-974b-4168-a88d-cdf25262446c]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-1be46266-974b-4168-a88d-cdf25262446c.15a9e04ea49cc6b1], Reason = [Started], Message = [Started container filler-pod-1be46266-974b-4168-a88d-cdf25262446c]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-fc6d7724-21c7-4ee1-964f-27da8e1cd0fe.15a9e04e555d265f], Reason = [Scheduled], Message = [Successfully assigned sched-pred-1995/filler-pod-fc6d7724-21c7-4ee1-964f-27da8e1cd0fe to ip-10-100-10-111.eu-west-1.compute.internal]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-fc6d7724-21c7-4ee1-964f-27da8e1cd0fe.15a9e04e90ee967e], Reason = [Pulled], Message = [Container image "k8s.gcr.io/pause:3.1" already present on machine]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-fc6d7724-21c7-4ee1-964f-27da8e1cd0fe.15a9e04e94ee7545], Reason = [Created], Message = [Created container filler-pod-fc6d7724-21c7-4ee1-964f-27da8e1cd0fe]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-fc6d7724-21c7-4ee1-964f-27da8e1cd0fe.15a9e04e9ccc01a3], Reason = [Started], Message = [Started container filler-pod-fc6d7724-21c7-4ee1-964f-27da8e1cd0fe]
+STEP: Considering event: 
+Type = [Warning], Name = [additional-pod.15a9e04f4510a7dd], Reason = [FailedScheduling], Message = [0/5 nodes are available: 2 Insufficient cpu, 3 node(s) had taints that the pod didn't tolerate.]
+STEP: removing the label node off the node ip-10-100-10-111.eu-west-1.compute.internal
+STEP: verifying the node doesn't have the label node
+STEP: removing the label node off the node ip-10-100-12-226.eu-west-1.compute.internal
+STEP: verifying the node doesn't have the label node
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:09:37.951: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "sched-pred-1995" for this suite.
+Jun 20 10:09:43.962: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:09:44.026: INFO: namespace sched-pred-1995 deletion completed in 6.072014491s
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:72
+
+• [SLOW TEST:11.222 seconds]
+[sig-scheduling] SchedulerPredicates [Serial]
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:23
+  validates resource limits of pods that are allowed to run  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+[sig-apps] ReplicaSet 
+  should adopt matching pods on creation and release no longer matching pods [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-apps] ReplicaSet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:09:44.026: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename replicaset
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should adopt matching pods on creation and release no longer matching pods [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Given a Pod with a 'name' label pod-adoption-release is created
+STEP: When a replicaset with a matching selector is created
+STEP: Then the orphan pod is adopted
+STEP: When the matched label of one of its pods change
+Jun 20 10:09:49.098: INFO: Pod name pod-adoption-release: Found 1 pods out of 1
+STEP: Then the pod is released
+[AfterEach] [sig-apps] ReplicaSet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:09:50.114: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "replicaset-3495" for this suite.
+Jun 20 10:10:12.128: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:10:12.219: INFO: namespace replicaset-3495 deletion completed in 22.1023665s
+
+• [SLOW TEST:28.194 seconds]
+[sig-apps] ReplicaSet
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23
+  should adopt matching pods on creation and release no longer matching pods [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected configMap 
+  should be consumable from pods in volume as non-root [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:10:12.222: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume as non-root [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating configMap with name projected-configmap-test-volume-e2cbdd61-e298-4c96-9dfa-4512bd6315f6
+STEP: Creating a pod to test consume configMaps
+Jun 20 10:10:12.276: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-d19b5089-e07d-4d7d-a60d-17a5a2d7be41" in namespace "projected-5530" to be "success or failure"
+Jun 20 10:10:12.282: INFO: Pod "pod-projected-configmaps-d19b5089-e07d-4d7d-a60d-17a5a2d7be41": Phase="Pending", Reason="", readiness=false. Elapsed: 5.749911ms
+Jun 20 10:10:14.285: INFO: Pod "pod-projected-configmaps-d19b5089-e07d-4d7d-a60d-17a5a2d7be41": Phase="Running", Reason="", readiness=true. Elapsed: 2.008993667s
+Jun 20 10:10:16.288: INFO: Pod "pod-projected-configmaps-d19b5089-e07d-4d7d-a60d-17a5a2d7be41": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.012261555s
+STEP: Saw pod success
+Jun 20 10:10:16.288: INFO: Pod "pod-projected-configmaps-d19b5089-e07d-4d7d-a60d-17a5a2d7be41" satisfied condition "success or failure"
+Jun 20 10:10:16.291: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-projected-configmaps-d19b5089-e07d-4d7d-a60d-17a5a2d7be41 container projected-configmap-volume-test: 
+STEP: delete the pod
+Jun 20 10:10:16.310: INFO: Waiting for pod pod-projected-configmaps-d19b5089-e07d-4d7d-a60d-17a5a2d7be41 to disappear
+Jun 20 10:10:16.312: INFO: Pod pod-projected-configmaps-d19b5089-e07d-4d7d-a60d-17a5a2d7be41 no longer exists
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:10:16.313: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-5530" for this suite.
+Jun 20 10:10:22.324: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:10:22.389: INFO: namespace projected-5530 deletion completed in 6.073684033s
+
+• [SLOW TEST:10.167 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:33
+  should be consumable from pods in volume as non-root [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:10:22.389: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test downward API volume plugin
+Jun 20 10:10:22.429: INFO: Waiting up to 5m0s for pod "downwardapi-volume-09713fd3-421e-41f4-812c-2dc98318505c" in namespace "projected-2445" to be "success or failure"
+Jun 20 10:10:22.432: INFO: Pod "downwardapi-volume-09713fd3-421e-41f4-812c-2dc98318505c": Phase="Pending", Reason="", readiness=false. Elapsed: 3.324023ms
+Jun 20 10:10:24.436: INFO: Pod "downwardapi-volume-09713fd3-421e-41f4-812c-2dc98318505c": Phase="Pending", Reason="", readiness=false. Elapsed: 2.006616664s
+Jun 20 10:10:26.439: INFO: Pod "downwardapi-volume-09713fd3-421e-41f4-812c-2dc98318505c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.009891107s
+STEP: Saw pod success
+Jun 20 10:10:26.439: INFO: Pod "downwardapi-volume-09713fd3-421e-41f4-812c-2dc98318505c" satisfied condition "success or failure"
+Jun 20 10:10:26.441: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod downwardapi-volume-09713fd3-421e-41f4-812c-2dc98318505c container client-container: 
+STEP: delete the pod
+Jun 20 10:10:26.459: INFO: Waiting for pod downwardapi-volume-09713fd3-421e-41f4-812c-2dc98318505c to disappear
+Jun 20 10:10:26.462: INFO: Pod downwardapi-volume-09713fd3-421e-41f4-812c-2dc98318505c no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:10:26.462: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-2445" for this suite.
+Jun 20 10:10:32.479: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:10:32.545: INFO: namespace projected-2445 deletion completed in 6.080383399s
+
+• [SLOW TEST:10.155 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSS
+------------------------------
+[sig-storage] Secrets 
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Secrets
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:10:32.545: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename secrets
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating secret with name secret-test-map-46d6cb2a-be55-4c4f-8e37-1766ccf7df01
+STEP: Creating a pod to test consume secrets
+Jun 20 10:10:32.589: INFO: Waiting up to 5m0s for pod "pod-secrets-4688a25d-cffd-47b4-a365-d62c315cbd24" in namespace "secrets-7365" to be "success or failure"
+Jun 20 10:10:32.592: INFO: Pod "pod-secrets-4688a25d-cffd-47b4-a365-d62c315cbd24": Phase="Pending", Reason="", readiness=false. Elapsed: 3.042857ms
+Jun 20 10:10:34.595: INFO: Pod "pod-secrets-4688a25d-cffd-47b4-a365-d62c315cbd24": Phase="Pending", Reason="", readiness=false. Elapsed: 2.006138826s
+Jun 20 10:10:36.599: INFO: Pod "pod-secrets-4688a25d-cffd-47b4-a365-d62c315cbd24": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.009287343s
+STEP: Saw pod success
+Jun 20 10:10:36.599: INFO: Pod "pod-secrets-4688a25d-cffd-47b4-a365-d62c315cbd24" satisfied condition "success or failure"
+Jun 20 10:10:36.601: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-secrets-4688a25d-cffd-47b4-a365-d62c315cbd24 container secret-volume-test: 
+STEP: delete the pod
+Jun 20 10:10:36.618: INFO: Waiting for pod pod-secrets-4688a25d-cffd-47b4-a365-d62c315cbd24 to disappear
+Jun 20 10:10:36.621: INFO: Pod pod-secrets-4688a25d-cffd-47b4-a365-d62c315cbd24 no longer exists
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:10:36.621: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "secrets-7365" for this suite.
+Jun 20 10:10:42.634: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:10:42.699: INFO: namespace secrets-7365 deletion completed in 6.075037589s
+
+• [SLOW TEST:10.154 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:33
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+S
+------------------------------
+[k8s.io] Pods 
+  should be updated [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:10:42.699: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename pods
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:164
+[It] should be updated [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: creating the pod
+STEP: submitting the pod to kubernetes
+STEP: verifying the pod is in kubernetes
+STEP: updating the pod
+Jun 20 10:10:47.262: INFO: Successfully updated pod "pod-update-481f1210-8398-488b-b9d7-08bbbd686871"
+STEP: verifying the updated pod is in kubernetes
+Jun 20 10:10:47.268: INFO: Pod update OK
+[AfterEach] [k8s.io] Pods
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:10:47.268: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pods-140" for this suite.
+Jun 20 10:11:09.280: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:11:09.352: INFO: namespace pods-140 deletion completed in 22.081283035s
+
+• [SLOW TEST:26.653 seconds]
+[k8s.io] Pods
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  should be updated [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected configMap 
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:11:09.353: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating configMap with name projected-configmap-test-volume-map-ec2c8b0e-2b4a-4cc2-abb1-45ff4720283e
+STEP: Creating a pod to test consume configMaps
+Jun 20 10:11:09.397: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-d193f668-5a9e-4f2d-bf8b-5b1fc56c4bc7" in namespace "projected-1257" to be "success or failure"
+Jun 20 10:11:09.404: INFO: Pod "pod-projected-configmaps-d193f668-5a9e-4f2d-bf8b-5b1fc56c4bc7": Phase="Pending", Reason="", readiness=false. Elapsed: 6.56158ms
+Jun 20 10:11:11.407: INFO: Pod "pod-projected-configmaps-d193f668-5a9e-4f2d-bf8b-5b1fc56c4bc7": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.009605758s
+STEP: Saw pod success
+Jun 20 10:11:11.407: INFO: Pod "pod-projected-configmaps-d193f668-5a9e-4f2d-bf8b-5b1fc56c4bc7" satisfied condition "success or failure"
+Jun 20 10:11:11.409: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-projected-configmaps-d193f668-5a9e-4f2d-bf8b-5b1fc56c4bc7 container projected-configmap-volume-test: 
+STEP: delete the pod
+Jun 20 10:11:11.425: INFO: Waiting for pod pod-projected-configmaps-d193f668-5a9e-4f2d-bf8b-5b1fc56c4bc7 to disappear
+Jun 20 10:11:11.428: INFO: Pod pod-projected-configmaps-d193f668-5a9e-4f2d-bf8b-5b1fc56c4bc7 no longer exists
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:11:11.428: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-1257" for this suite.
+Jun 20 10:11:17.441: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:11:17.515: INFO: namespace projected-1257 deletion completed in 6.08369595s
+
+• [SLOW TEST:8.162 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:33
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:11:17.515: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test emptydir 0666 on tmpfs
+Jun 20 10:11:17.553: INFO: Waiting up to 5m0s for pod "pod-abb41195-b659-4f77-ade9-5ee4e0a0dbcf" in namespace "emptydir-5943" to be "success or failure"
+Jun 20 10:11:17.559: INFO: Pod "pod-abb41195-b659-4f77-ade9-5ee4e0a0dbcf": Phase="Pending", Reason="", readiness=false. Elapsed: 5.335172ms
+Jun 20 10:11:19.562: INFO: Pod "pod-abb41195-b659-4f77-ade9-5ee4e0a0dbcf": Phase="Pending", Reason="", readiness=false. Elapsed: 2.008661484s
+Jun 20 10:11:21.565: INFO: Pod "pod-abb41195-b659-4f77-ade9-5ee4e0a0dbcf": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.011833573s
+STEP: Saw pod success
+Jun 20 10:11:21.565: INFO: Pod "pod-abb41195-b659-4f77-ade9-5ee4e0a0dbcf" satisfied condition "success or failure"
+Jun 20 10:11:21.567: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-abb41195-b659-4f77-ade9-5ee4e0a0dbcf container test-container: 
+STEP: delete the pod
+Jun 20 10:11:21.584: INFO: Waiting for pod pod-abb41195-b659-4f77-ade9-5ee4e0a0dbcf to disappear
+Jun 20 10:11:21.586: INFO: Pod pod-abb41195-b659-4f77-ade9-5ee4e0a0dbcf no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:11:21.587: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-5943" for this suite.
+Jun 20 10:11:27.599: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:11:27.666: INFO: namespace emptydir-5943 deletion completed in 6.076781564s
+
+• [SLOW TEST:10.151 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SS
+------------------------------
+[k8s.io] Kubelet when scheduling a busybox command in a pod 
+  should print the output to logs [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:11:27.666: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename kubelet-test
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37
+[It] should print the output to logs [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[AfterEach] [k8s.io] Kubelet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:11:29.745: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubelet-test-6605" for this suite.
+Jun 20 10:12:21.760: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:12:21.837: INFO: namespace kubelet-test-6605 deletion completed in 52.088359469s
+
+• [SLOW TEST:54.171 seconds]
+[k8s.io] Kubelet
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  when scheduling a busybox command in a pod
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:40
+    should print the output to logs [NodeConformance] [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+S
+------------------------------
+[k8s.io] Pods 
+  should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:12:21.837: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename pods
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:164
+[It] should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: creating the pod
+STEP: submitting the pod to kubernetes
+STEP: verifying the pod is in kubernetes
+STEP: updating the pod
+Jun 20 10:12:26.403: INFO: Successfully updated pod "pod-update-activedeadlineseconds-7345890e-d27c-4ead-9926-1d678bf1583d"
+Jun 20 10:12:26.403: INFO: Waiting up to 5m0s for pod "pod-update-activedeadlineseconds-7345890e-d27c-4ead-9926-1d678bf1583d" in namespace "pods-3411" to be "terminated due to deadline exceeded"
+Jun 20 10:12:26.405: INFO: Pod "pod-update-activedeadlineseconds-7345890e-d27c-4ead-9926-1d678bf1583d": Phase="Running", Reason="", readiness=true. Elapsed: 2.888361ms
+Jun 20 10:12:28.411: INFO: Pod "pod-update-activedeadlineseconds-7345890e-d27c-4ead-9926-1d678bf1583d": Phase="Failed", Reason="DeadlineExceeded", readiness=false. Elapsed: 2.008404988s
+Jun 20 10:12:28.411: INFO: Pod "pod-update-activedeadlineseconds-7345890e-d27c-4ead-9926-1d678bf1583d" satisfied condition "terminated due to deadline exceeded"
+[AfterEach] [k8s.io] Pods
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:12:28.411: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pods-3411" for this suite.
+Jun 20 10:12:34.423: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:12:34.491: INFO: namespace pods-3411 deletion completed in 6.077312555s
+
+• [SLOW TEST:12.655 seconds]
+[k8s.io] Pods
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should provide container's memory limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:12:34.492: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should provide container's memory limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test downward API volume plugin
+Jun 20 10:12:34.533: INFO: Waiting up to 5m0s for pod "downwardapi-volume-f3a16227-c4db-4e84-9a22-19f72e4d5973" in namespace "projected-9146" to be "success or failure"
+Jun 20 10:12:34.537: INFO: Pod "downwardapi-volume-f3a16227-c4db-4e84-9a22-19f72e4d5973": Phase="Pending", Reason="", readiness=false. Elapsed: 4.637046ms
+Jun 20 10:12:36.541: INFO: Pod "downwardapi-volume-f3a16227-c4db-4e84-9a22-19f72e4d5973": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.007961424s
+STEP: Saw pod success
+Jun 20 10:12:36.541: INFO: Pod "downwardapi-volume-f3a16227-c4db-4e84-9a22-19f72e4d5973" satisfied condition "success or failure"
+Jun 20 10:12:36.543: INFO: Trying to get logs from node ip-10-100-12-226.eu-west-1.compute.internal pod downwardapi-volume-f3a16227-c4db-4e84-9a22-19f72e4d5973 container client-container: 
+STEP: delete the pod
+Jun 20 10:12:36.564: INFO: Waiting for pod downwardapi-volume-f3a16227-c4db-4e84-9a22-19f72e4d5973 to disappear
+Jun 20 10:12:36.566: INFO: Pod downwardapi-volume-f3a16227-c4db-4e84-9a22-19f72e4d5973 no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:12:36.567: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-9146" for this suite.
+Jun 20 10:12:42.582: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:12:42.680: INFO: namespace projected-9146 deletion completed in 6.110433965s
+
+• [SLOW TEST:8.188 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should provide container's memory limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSS
+------------------------------
+[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook 
+  should execute prestop exec hook properly [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Container Lifecycle Hook
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:12:42.680: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename container-lifecycle-hook
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] when create a pod with lifecycle hook
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:63
+STEP: create the container to handle the HTTPGet hook request.
+[It] should execute prestop exec hook properly [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: create the pod with lifecycle hook
+STEP: delete the pod with lifecycle hook
+Jun 20 10:12:50.784: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun 20 10:12:50.787: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun 20 10:12:52.787: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun 20 10:12:52.790: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun 20 10:12:54.787: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun 20 10:12:54.790: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun 20 10:12:56.787: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun 20 10:12:56.790: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun 20 10:12:58.787: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun 20 10:12:58.790: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun 20 10:13:00.787: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun 20 10:13:00.791: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun 20 10:13:02.787: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun 20 10:13:02.790: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun 20 10:13:04.787: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun 20 10:13:04.790: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun 20 10:13:06.787: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun 20 10:13:06.790: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun 20 10:13:08.787: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun 20 10:13:08.790: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun 20 10:13:10.787: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun 20 10:13:10.791: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun 20 10:13:12.787: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun 20 10:13:12.790: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun 20 10:13:14.787: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun 20 10:13:14.790: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun 20 10:13:16.787: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun 20 10:13:16.790: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun 20 10:13:18.787: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun 20 10:13:18.790: INFO: Pod pod-with-prestop-exec-hook no longer exists
+STEP: check prestop hook
+[AfterEach] [k8s.io] Container Lifecycle Hook
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:13:18.804: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-lifecycle-hook-2698" for this suite.
+Jun 20 10:13:40.816: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:13:40.902: INFO: namespace container-lifecycle-hook-2698 deletion completed in 22.095749499s
+
+• [SLOW TEST:58.222 seconds]
+[k8s.io] Container Lifecycle Hook
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  when create a pod with lifecycle hook
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:42
+    should execute prestop exec hook properly [NodeConformance] [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSS
+------------------------------
+[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook 
+  should execute poststart exec hook properly [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Container Lifecycle Hook
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:13:40.903: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename container-lifecycle-hook
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] when create a pod with lifecycle hook
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:63
+STEP: create the container to handle the HTTPGet hook request.
+[It] should execute poststart exec hook properly [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: create the pod with lifecycle hook
+STEP: check poststart hook
+STEP: delete the pod with lifecycle hook
+Jun 20 10:13:48.976: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun 20 10:13:48.980: INFO: Pod pod-with-poststart-exec-hook still exists
+Jun 20 10:13:50.980: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun 20 10:13:50.984: INFO: Pod pod-with-poststart-exec-hook still exists
+Jun 20 10:13:52.980: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun 20 10:13:52.984: INFO: Pod pod-with-poststart-exec-hook still exists
+Jun 20 10:13:54.980: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun 20 10:13:54.983: INFO: Pod pod-with-poststart-exec-hook still exists
+Jun 20 10:13:56.980: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun 20 10:13:56.983: INFO: Pod pod-with-poststart-exec-hook still exists
+Jun 20 10:13:58.980: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun 20 10:13:58.983: INFO: Pod pod-with-poststart-exec-hook still exists
+Jun 20 10:14:00.980: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun 20 10:14:00.983: INFO: Pod pod-with-poststart-exec-hook still exists
+Jun 20 10:14:02.980: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun 20 10:14:02.983: INFO: Pod pod-with-poststart-exec-hook still exists
+Jun 20 10:14:04.980: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun 20 10:14:04.983: INFO: Pod pod-with-poststart-exec-hook still exists
+Jun 20 10:14:06.980: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun 20 10:14:06.983: INFO: Pod pod-with-poststart-exec-hook no longer exists
+[AfterEach] [k8s.io] Container Lifecycle Hook
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:14:06.983: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-lifecycle-hook-8097" for this suite.
+Jun 20 10:14:29.000: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:14:29.073: INFO: namespace container-lifecycle-hook-8097 deletion completed in 22.08568228s
+
+• [SLOW TEST:48.170 seconds]
+[k8s.io] Container Lifecycle Hook
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  when create a pod with lifecycle hook
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:42
+    should execute poststart exec hook properly [NodeConformance] [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] ReplicationController 
+  should surface a failure condition on a common issue like exceeded quota [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-apps] ReplicationController
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:14:29.073: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename replication-controller
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should surface a failure condition on a common issue like exceeded quota [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+Jun 20 10:14:29.106: INFO: Creating quota "condition-test" that allows only two pods to run in the current namespace
+STEP: Creating rc "condition-test" that asks for more than the allowed pod quota
+STEP: Checking rc "condition-test" has the desired failure condition set
+STEP: Scaling down rc "condition-test" to satisfy pod quota
+Jun 20 10:14:31.138: INFO: Updating replication controller "condition-test"
+STEP: Checking rc "condition-test" has no failure condition set
+[AfterEach] [sig-apps] ReplicationController
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:14:32.144: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "replication-controller-7989" for this suite.
+Jun 20 10:14:38.158: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:14:38.227: INFO: namespace replication-controller-7989 deletion completed in 6.080533599s
+
+• [SLOW TEST:9.154 seconds]
+[sig-apps] ReplicationController
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23
+  should surface a failure condition on a common issue like exceeded quota [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:14:38.228: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test emptydir 0666 on node default medium
+Jun 20 10:14:38.276: INFO: Waiting up to 5m0s for pod "pod-7b4733d4-dc45-4d62-a9f6-7e2de801691f" in namespace "emptydir-3723" to be "success or failure"
+Jun 20 10:14:38.283: INFO: Pod "pod-7b4733d4-dc45-4d62-a9f6-7e2de801691f": Phase="Pending", Reason="", readiness=false. Elapsed: 6.548381ms
+Jun 20 10:14:40.286: INFO: Pod "pod-7b4733d4-dc45-4d62-a9f6-7e2de801691f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.009765074s
+STEP: Saw pod success
+Jun 20 10:14:40.286: INFO: Pod "pod-7b4733d4-dc45-4d62-a9f6-7e2de801691f" satisfied condition "success or failure"
+Jun 20 10:14:40.288: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-7b4733d4-dc45-4d62-a9f6-7e2de801691f container test-container: 
+STEP: delete the pod
+Jun 20 10:14:40.306: INFO: Waiting for pod pod-7b4733d4-dc45-4d62-a9f6-7e2de801691f to disappear
+Jun 20 10:14:40.309: INFO: Pod pod-7b4733d4-dc45-4d62-a9f6-7e2de801691f no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:14:40.309: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-3723" for this suite.
+Jun 20 10:14:46.322: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:14:46.392: INFO: namespace emptydir-3723 deletion completed in 6.080731767s
+
+• [SLOW TEST:8.164 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl patch 
+  should add annotations for pods in rc  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:14:46.393: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:221
+[It] should add annotations for pods in rc  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: creating Redis RC
+Jun 20 10:14:46.427: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 create -f - --namespace=kubectl-2788'
+Jun 20 10:14:46.693: INFO: stderr: ""
+Jun 20 10:14:46.693: INFO: stdout: "replicationcontroller/redis-master created\n"
+STEP: Waiting for Redis master to start.
+Jun 20 10:14:47.696: INFO: Selector matched 1 pods for map[app:redis]
+Jun 20 10:14:47.696: INFO: Found 0 / 1
+Jun 20 10:14:48.697: INFO: Selector matched 1 pods for map[app:redis]
+Jun 20 10:14:48.697: INFO: Found 0 / 1
+Jun 20 10:14:49.696: INFO: Selector matched 1 pods for map[app:redis]
+Jun 20 10:14:49.696: INFO: Found 0 / 1
+Jun 20 10:14:50.696: INFO: Selector matched 1 pods for map[app:redis]
+Jun 20 10:14:50.696: INFO: Found 1 / 1
+Jun 20 10:14:50.696: INFO: WaitFor completed with timeout 5m0s.  Pods found = 1 out of 1
+STEP: patching all pods
+Jun 20 10:14:50.699: INFO: Selector matched 1 pods for map[app:redis]
+Jun 20 10:14:50.699: INFO: ForEach: Found 1 pods from the filter.  Now looping through them.
+Jun 20 10:14:50.699: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 patch pod redis-master-psk77 --namespace=kubectl-2788 -p {"metadata":{"annotations":{"x":"y"}}}'
+Jun 20 10:14:50.977: INFO: stderr: ""
+Jun 20 10:14:50.977: INFO: stdout: "pod/redis-master-psk77 patched\n"
+STEP: checking annotations
+Jun 20 10:14:50.980: INFO: Selector matched 1 pods for map[app:redis]
+Jun 20 10:14:50.980: INFO: ForEach: Found 1 pods from the filter.  Now looping through them.
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:14:50.980: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-2788" for this suite.
+Jun 20 10:15:12.997: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:15:13.066: INFO: namespace kubectl-2788 deletion completed in 22.082361891s
+
+• [SLOW TEST:26.673 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl patch
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+    should add annotations for pods in rc  [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl api-versions 
+  should check if v1 is in available api versions  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:15:13.066: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:221
+[It] should check if v1 is in available api versions  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: validating api versions
+Jun 20 10:15:13.098: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 api-versions'
+Jun 20 10:15:13.181: INFO: stderr: ""
+Jun 20 10:15:13.181: INFO: stdout: "admissionregistration.k8s.io/v1beta1\napiextensions.k8s.io/v1beta1\napiregistration.k8s.io/v1\napiregistration.k8s.io/v1beta1\napps/v1\napps/v1beta1\napps/v1beta2\nauthentication.k8s.io/v1\nauthentication.k8s.io/v1beta1\nauthorization.k8s.io/v1\nauthorization.k8s.io/v1beta1\nautoscaling/v1\nautoscaling/v2beta1\nautoscaling/v2beta2\nbatch/v1\nbatch/v1beta1\ncertificates.k8s.io/v1beta1\ncoordination.k8s.io/v1\ncoordination.k8s.io/v1beta1\nevents.k8s.io/v1beta1\nextensions/v1beta1\nnetworking.k8s.io/v1\nnetworking.k8s.io/v1beta1\nnode.k8s.io/v1beta1\npolicy/v1beta1\nrbac.authorization.k8s.io/v1\nrbac.authorization.k8s.io/v1beta1\nscheduling.k8s.io/v1\nscheduling.k8s.io/v1beta1\nstorage.k8s.io/v1\nstorage.k8s.io/v1beta1\nv1\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:15:13.181: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-5499" for this suite.
+Jun 20 10:15:19.193: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:15:19.292: INFO: namespace kubectl-5499 deletion completed in 6.10756148s
+
+• [SLOW TEST:6.226 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl api-versions
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+    should check if v1 is in available api versions  [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSS
+------------------------------
+[sig-apps] Daemon set [Serial] 
+  should run and stop simple daemon [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:15:19.292: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename daemonsets
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:103
+[It] should run and stop simple daemon [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating simple DaemonSet "daemon-set"
+STEP: Check that daemon pods launch on every node of the cluster.
+Jun 20 10:15:19.377: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:19.377: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:19.377: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:19.381: INFO: Number of nodes with available pods: 0
+Jun 20 10:15:19.382: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 10:15:20.385: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:20.386: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:20.386: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:20.389: INFO: Number of nodes with available pods: 0
+Jun 20 10:15:20.389: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 10:15:21.385: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:21.385: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:21.385: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:21.388: INFO: Number of nodes with available pods: 1
+Jun 20 10:15:21.388: INFO: Node ip-10-100-12-226.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 10:15:22.385: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:22.386: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:22.386: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:22.389: INFO: Number of nodes with available pods: 2
+Jun 20 10:15:22.389: INFO: Number of running nodes: 2, number of available pods: 2
+STEP: Stop a daemon pod, check that the daemon pod is revived.
+Jun 20 10:15:22.404: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:22.404: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:22.405: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:22.407: INFO: Number of nodes with available pods: 1
+Jun 20 10:15:22.407: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 10:15:23.412: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:23.412: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:23.412: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:23.415: INFO: Number of nodes with available pods: 1
+Jun 20 10:15:23.415: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 10:15:24.411: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:24.411: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:24.411: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:24.413: INFO: Number of nodes with available pods: 1
+Jun 20 10:15:24.413: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 10:15:25.411: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:25.411: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:25.411: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:25.414: INFO: Number of nodes with available pods: 1
+Jun 20 10:15:25.414: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 10:15:26.423: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:26.424: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:26.424: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:26.427: INFO: Number of nodes with available pods: 1
+Jun 20 10:15:26.427: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 10:15:27.412: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:27.412: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:27.412: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:15:27.415: INFO: Number of nodes with available pods: 2
+Jun 20 10:15:27.415: INFO: Number of running nodes: 2, number of available pods: 2
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:69
+STEP: Deleting DaemonSet "daemon-set"
+STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-9298, will wait for the garbage collector to delete the pods
+Jun 20 10:15:27.478: INFO: Deleting DaemonSet.extensions daemon-set took: 7.109033ms
+Jun 20 10:15:27.778: INFO: Terminating DaemonSet.extensions daemon-set pods took: 300.302289ms
+Jun 20 10:15:31.381: INFO: Number of nodes with available pods: 0
+Jun 20 10:15:31.381: INFO: Number of running nodes: 0, number of available pods: 0
+Jun 20 10:15:31.385: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/daemonsets-9298/daemonsets","resourceVersion":"10161"},"items":null}
+
+Jun 20 10:15:31.387: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/daemonsets-9298/pods","resourceVersion":"10161"},"items":null}
+
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:15:31.402: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "daemonsets-9298" for this suite.
+Jun 20 10:15:37.416: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:15:37.505: INFO: namespace daemonsets-9298 deletion completed in 6.100564104s
+
+• [SLOW TEST:18.213 seconds]
+[sig-apps] Daemon set [Serial]
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23
+  should run and stop simple daemon [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:15:37.506: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename gc
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: create the rc1
+STEP: create the rc2
+STEP: set half of pods created by rc simpletest-rc-to-be-deleted to have rc simpletest-rc-to-stay as owner as well
+STEP: delete the rc simpletest-rc-to-be-deleted
+STEP: wait for the rc to be deleted
+STEP: Gathering metrics
+W0620 10:15:47.637562      15 metrics_grabber.go:79] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
+Jun 20 10:15:47.637: INFO: For apiserver_request_total:
+For apiserver_request_latencies_summary:
+For apiserver_init_events_total:
+For garbage_collector_attempt_to_delete_queue_latency:
+For garbage_collector_attempt_to_delete_work_duration:
+For garbage_collector_attempt_to_orphan_queue_latency:
+For garbage_collector_attempt_to_orphan_work_duration:
+For garbage_collector_dirty_processing_latency_microseconds:
+For garbage_collector_event_processing_latency_microseconds:
+For garbage_collector_graph_changes_queue_latency:
+For garbage_collector_graph_changes_work_duration:
+For garbage_collector_orphan_processing_latency_microseconds:
+For namespace_queue_latency:
+For namespace_queue_latency_sum:
+For namespace_queue_latency_count:
+For namespace_retries:
+For namespace_work_duration:
+For namespace_work_duration_sum:
+For namespace_work_duration_count:
+For function_duration_seconds:
+For errors_total:
+For evicted_pods_total:
+
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:15:47.637: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "gc-2120" for this suite.
+Jun 20 10:15:53.650: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:15:53.715: INFO: namespace gc-2120 deletion completed in 6.075096932s
+
+• [SLOW TEST:16.209 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23
+  should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+S
+------------------------------
+[sig-storage] ConfigMap 
+  optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:15:53.716: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename configmap
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating configMap with name cm-test-opt-del-49d2b3a7-629d-426b-92f2-56c3316d54e8
+STEP: Creating configMap with name cm-test-opt-upd-c125856c-a4fe-4aeb-90f8-7e33d2f68ba0
+STEP: Creating the pod
+STEP: Deleting configmap cm-test-opt-del-49d2b3a7-629d-426b-92f2-56c3316d54e8
+STEP: Updating configmap cm-test-opt-upd-c125856c-a4fe-4aeb-90f8-7e33d2f68ba0
+STEP: Creating configMap with name cm-test-opt-create-13bf533e-9160-4687-8c1b-7cca857cacb5
+STEP: waiting to observe update in volume
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:17:12.159: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "configmap-425" for this suite.
+Jun 20 10:17:34.172: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:17:34.240: INFO: namespace configmap-425 deletion completed in 22.078025705s
+
+• [SLOW TEST:100.524 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32
+  optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSS
+------------------------------
+[sig-storage] Secrets 
+  should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Secrets
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:17:34.240: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename secrets
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating secret with name secret-test-aaa0314d-8b60-461f-b86e-6cea11f2da90
+STEP: Creating a pod to test consume secrets
+Jun 20 10:17:34.286: INFO: Waiting up to 5m0s for pod "pod-secrets-1c1cc420-f30c-41e6-8085-d300614ab648" in namespace "secrets-5209" to be "success or failure"
+Jun 20 10:17:34.292: INFO: Pod "pod-secrets-1c1cc420-f30c-41e6-8085-d300614ab648": Phase="Pending", Reason="", readiness=false. Elapsed: 5.9239ms
+Jun 20 10:17:36.296: INFO: Pod "pod-secrets-1c1cc420-f30c-41e6-8085-d300614ab648": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.009303618s
+STEP: Saw pod success
+Jun 20 10:17:36.296: INFO: Pod "pod-secrets-1c1cc420-f30c-41e6-8085-d300614ab648" satisfied condition "success or failure"
+Jun 20 10:17:36.299: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-secrets-1c1cc420-f30c-41e6-8085-d300614ab648 container secret-volume-test: 
+STEP: delete the pod
+Jun 20 10:17:36.321: INFO: Waiting for pod pod-secrets-1c1cc420-f30c-41e6-8085-d300614ab648 to disappear
+Jun 20 10:17:36.324: INFO: Pod pod-secrets-1c1cc420-f30c-41e6-8085-d300614ab648 no longer exists
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:17:36.324: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "secrets-5209" for this suite.
+Jun 20 10:17:42.336: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:17:42.403: INFO: namespace secrets-5209 deletion completed in 6.077001636s
+
+• [SLOW TEST:8.163 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:33
+  should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl version 
+  should check is all data is printed  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:17:42.404: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:221
+[It] should check is all data is printed  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+Jun 20 10:17:42.434: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 version'
+Jun 20 10:17:42.494: INFO: stderr: ""
+Jun 20 10:17:42.494: INFO: stdout: "Client Version: version.Info{Major:\"1\", Minor:\"15\", GitVersion:\"v1.15.0\", GitCommit:\"e8462b5b5dc2584fdcd18e6bcfe9f1e4d970a529\", GitTreeState:\"clean\", BuildDate:\"2019-06-19T16:40:16Z\", GoVersion:\"go1.12.5\", Compiler:\"gc\", Platform:\"linux/amd64\"}\nServer Version: version.Info{Major:\"1\", Minor:\"15\", GitVersion:\"v1.15.0\", GitCommit:\"e8462b5b5dc2584fdcd18e6bcfe9f1e4d970a529\", GitTreeState:\"clean\", BuildDate:\"2019-06-19T16:32:14Z\", GoVersion:\"go1.12.5\", Compiler:\"gc\", Platform:\"linux/amd64\"}\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:17:42.494: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-4959" for this suite.
+Jun 20 10:17:48.507: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:17:48.575: INFO: namespace kubectl-4959 deletion completed in 6.076827789s
+
+• [SLOW TEST:6.171 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl version
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+    should check is all data is printed  [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSS
+------------------------------
+[sig-apps] ReplicationController 
+  should release no longer matching pods [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-apps] ReplicationController
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:17:48.576: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename replication-controller
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should release no longer matching pods [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Given a ReplicationController is created
+STEP: When the matched label of one of its pods change
+Jun 20 10:17:48.616: INFO: Pod name pod-release: Found 0 pods out of 1
+Jun 20 10:17:53.620: INFO: Pod name pod-release: Found 1 pods out of 1
+STEP: Then the pod is released
+[AfterEach] [sig-apps] ReplicationController
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:17:54.635: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "replication-controller-9719" for this suite.
+Jun 20 10:18:00.647: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:18:00.711: INFO: namespace replication-controller-9719 deletion completed in 6.073984567s
+
+• [SLOW TEST:12.135 seconds]
+[sig-apps] ReplicationController
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23
+  should release no longer matching pods [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSS
+------------------------------
+[sig-scheduling] SchedulerPredicates [Serial] 
+  validates that NodeSelector is respected if not matching  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:18:00.712: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename sched-pred
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:81
+Jun 20 10:18:00.744: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready
+Jun 20 10:18:00.749: INFO: Waiting for terminating namespaces to be deleted...
+Jun 20 10:18:00.751: INFO: 
+Logging pods the kubelet thinks is on node ip-10-100-10-111.eu-west-1.compute.internal before test
+Jun 20 10:18:00.755: INFO: weave-net-nh2zg from kube-system started at 2019-06-20 09:35:33 +0000 UTC (2 container statuses recorded)
+Jun 20 10:18:00.755: INFO: 	Container weave ready: true, restart count 0
+Jun 20 10:18:00.755: INFO: 	Container weave-npc ready: true, restart count 0
+Jun 20 10:18:00.755: INFO: sonobuoy from heptio-sonobuoy started at 2019-06-20 09:59:16 +0000 UTC (1 container statuses recorded)
+Jun 20 10:18:00.755: INFO: 	Container kube-sonobuoy ready: true, restart count 0
+Jun 20 10:18:00.755: INFO: kube-proxy-9j68g from kube-system started at 2019-06-20 09:12:47 +0000 UTC (1 container statuses recorded)
+Jun 20 10:18:00.755: INFO: 	Container kube-proxy ready: true, restart count 0
+Jun 20 10:18:00.755: INFO: sonobuoy-systemd-logs-daemon-set-de69c0149d564b9d-fz4v7 from heptio-sonobuoy started at 2019-06-20 09:59:22 +0000 UTC (2 container statuses recorded)
+Jun 20 10:18:00.755: INFO: 	Container sonobuoy-worker ready: true, restart count 0
+Jun 20 10:18:00.755: INFO: 	Container systemd-logs ready: true, restart count 0
+Jun 20 10:18:00.755: INFO: 
+Logging pods the kubelet thinks is on node ip-10-100-12-226.eu-west-1.compute.internal before test
+Jun 20 10:18:00.761: INFO: kube-proxy-4c7sq from kube-system started at 2019-06-20 09:12:47 +0000 UTC (1 container statuses recorded)
+Jun 20 10:18:00.761: INFO: 	Container kube-proxy ready: true, restart count 0
+Jun 20 10:18:00.761: INFO: weave-net-9x9dh from kube-system started at 2019-06-20 09:35:33 +0000 UTC (2 container statuses recorded)
+Jun 20 10:18:00.761: INFO: 	Container weave ready: true, restart count 0
+Jun 20 10:18:00.761: INFO: 	Container weave-npc ready: true, restart count 0
+Jun 20 10:18:00.761: INFO: sonobuoy-e2e-job-dca131905d74464b from heptio-sonobuoy started at 2019-06-20 09:59:22 +0000 UTC (2 container statuses recorded)
+Jun 20 10:18:00.761: INFO: 	Container e2e ready: true, restart count 0
+Jun 20 10:18:00.761: INFO: 	Container sonobuoy-worker ready: true, restart count 0
+Jun 20 10:18:00.761: INFO: sonobuoy-systemd-logs-daemon-set-de69c0149d564b9d-m4zhg from heptio-sonobuoy started at 2019-06-20 09:59:22 +0000 UTC (2 container statuses recorded)
+Jun 20 10:18:00.761: INFO: 	Container sonobuoy-worker ready: true, restart count 0
+Jun 20 10:18:00.761: INFO: 	Container systemd-logs ready: true, restart count 0
+[It] validates that NodeSelector is respected if not matching  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Trying to schedule Pod with nonempty NodeSelector.
+STEP: Considering event: 
+Type = [Warning], Name = [restricted-pod.15a9e0c495279b00], Reason = [FailedScheduling], Message = [0/5 nodes are available: 5 node(s) didn't match node selector.]
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:18:01.785: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "sched-pred-3199" for this suite.
+Jun 20 10:18:07.799: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:18:07.880: INFO: namespace sched-pred-3199 deletion completed in 6.091780944s
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:72
+
+• [SLOW TEST:7.169 seconds]
+[sig-scheduling] SchedulerPredicates [Serial]
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:23
+  validates that NodeSelector is respected if not matching  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Watchers 
+  should receive events on concurrent watches in same order [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:18:07.880: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename watch
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should receive events on concurrent watches in same order [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: starting a background goroutine to produce watch events
+STEP: creating watches starting from each resource version of the events produced and verifying they all receive resource versions in the same order
+[AfterEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:18:13.447: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "watch-2159" for this suite.
+Jun 20 10:18:19.588: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:18:19.668: INFO: namespace watch-2159 deletion completed in 6.182228785s
+
+• [SLOW TEST:11.787 seconds]
+[sig-api-machinery] Watchers
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23
+  should receive events on concurrent watches in same order [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] 
+  Burst scaling should run to completion even with unhealthy pods [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:18:19.668: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename statefulset
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:60
+[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:75
+STEP: Creating service test in namespace statefulset-3361
+[It] Burst scaling should run to completion even with unhealthy pods [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating stateful set ss in namespace statefulset-3361
+STEP: Waiting until all stateful set ss replicas will be running in namespace statefulset-3361
+Jun 20 10:18:19.714: INFO: Found 0 stateful pods, waiting for 1
+Jun 20 10:18:29.718: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true
+STEP: Confirming that stateful set scale up will not halt with unhealthy stateful pod
+Jun 20 10:18:29.721: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 exec --namespace=statefulset-3361 ss-0 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+Jun 20 10:18:29.882: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n"
+Jun 20 10:18:29.882: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+Jun 20 10:18:29.882: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-0: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+Jun 20 10:18:29.885: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=true
+Jun 20 10:18:39.889: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false
+Jun 20 10:18:39.889: INFO: Waiting for statefulset status.replicas updated to 0
+Jun 20 10:18:39.900: INFO: POD   NODE                                         PHASE    GRACE  CONDITIONS
+Jun 20 10:18:39.900: INFO: ss-0  ip-10-100-10-111.eu-west-1.compute.internal  Running         [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:18:19 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:18:29 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:18:29 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:18:19 +0000 UTC  }]
+Jun 20 10:18:39.900: INFO: 
+Jun 20 10:18:39.900: INFO: StatefulSet ss has not reached scale 3, at 1
+Jun 20 10:18:40.903: INFO: Verifying statefulset ss doesn't scale past 3 for another 8.997211592s
+Jun 20 10:18:41.907: INFO: Verifying statefulset ss doesn't scale past 3 for another 7.993726079s
+Jun 20 10:18:42.910: INFO: Verifying statefulset ss doesn't scale past 3 for another 6.990139935s
+Jun 20 10:18:43.914: INFO: Verifying statefulset ss doesn't scale past 3 for another 5.986627223s
+Jun 20 10:18:44.918: INFO: Verifying statefulset ss doesn't scale past 3 for another 4.982960509s
+Jun 20 10:18:45.922: INFO: Verifying statefulset ss doesn't scale past 3 for another 3.979159238s
+Jun 20 10:18:46.925: INFO: Verifying statefulset ss doesn't scale past 3 for another 2.975352035s
+Jun 20 10:18:47.929: INFO: Verifying statefulset ss doesn't scale past 3 for another 1.971497772s
+Jun 20 10:18:48.933: INFO: Verifying statefulset ss doesn't scale past 3 for another 967.529598ms
+STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-3361
+Jun 20 10:18:49.937: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 exec --namespace=statefulset-3361 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun 20 10:18:50.105: INFO: stderr: "+ mv -v /tmp/index.html /usr/share/nginx/html/\n"
+Jun 20 10:18:50.105: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+Jun 20 10:18:50.105: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-0: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+Jun 20 10:18:50.105: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 exec --namespace=statefulset-3361 ss-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun 20 10:18:50.260: INFO: stderr: "+ mv -v /tmp/index.html /usr/share/nginx/html/\nmv: can't rename '/tmp/index.html': No such file or directory\n+ true\n"
+Jun 20 10:18:50.260: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+Jun 20 10:18:50.260: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-1: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+Jun 20 10:18:50.260: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 exec --namespace=statefulset-3361 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun 20 10:18:50.420: INFO: stderr: "+ mv -v /tmp/index.html /usr/share/nginx/html/\nmv: can't rename '/tmp/index.html': No such file or directory\n+ true\n"
+Jun 20 10:18:50.420: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+Jun 20 10:18:50.420: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-2: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+Jun 20 10:18:50.423: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false
+Jun 20 10:19:00.427: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true
+Jun 20 10:19:00.427: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true
+Jun 20 10:19:00.427: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=true
+STEP: Scale down will not halt with unhealthy stateful pod
+Jun 20 10:19:00.430: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 exec --namespace=statefulset-3361 ss-0 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+Jun 20 10:19:00.610: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n"
+Jun 20 10:19:00.610: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+Jun 20 10:19:00.610: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-0: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+Jun 20 10:19:00.610: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 exec --namespace=statefulset-3361 ss-1 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+Jun 20 10:19:00.766: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n"
+Jun 20 10:19:00.766: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+Jun 20 10:19:00.766: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-1: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+Jun 20 10:19:00.766: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 exec --namespace=statefulset-3361 ss-2 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+Jun 20 10:19:00.928: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n"
+Jun 20 10:19:00.928: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+Jun 20 10:19:00.928: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-2: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+Jun 20 10:19:00.929: INFO: Waiting for statefulset status.replicas updated to 0
+Jun 20 10:19:00.931: INFO: Waiting for stateful set status.readyReplicas to become 0, currently 3
+Jun 20 10:19:10.937: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false
+Jun 20 10:19:10.937: INFO: Waiting for pod ss-1 to enter Running - Ready=false, currently Running - Ready=false
+Jun 20 10:19:10.937: INFO: Waiting for pod ss-2 to enter Running - Ready=false, currently Running - Ready=false
+Jun 20 10:19:10.947: INFO: POD   NODE                                         PHASE    GRACE  CONDITIONS
+Jun 20 10:19:10.947: INFO: ss-0  ip-10-100-10-111.eu-west-1.compute.internal  Running         [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:18:19 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:19:00 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:19:00 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:18:19 +0000 UTC  }]
+Jun 20 10:19:10.947: INFO: ss-1  ip-10-100-12-226.eu-west-1.compute.internal  Running         [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:18:39 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:19:01 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:19:01 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:18:39 +0000 UTC  }]
+Jun 20 10:19:10.947: INFO: ss-2  ip-10-100-12-226.eu-west-1.compute.internal  Running         [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:18:39 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:19:01 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:19:01 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:18:39 +0000 UTC  }]
+Jun 20 10:19:10.947: INFO: 
+Jun 20 10:19:10.947: INFO: StatefulSet ss has not reached scale 0, at 3
+Jun 20 10:19:11.951: INFO: POD   NODE                                         PHASE    GRACE  CONDITIONS
+Jun 20 10:19:11.951: INFO: ss-0  ip-10-100-10-111.eu-west-1.compute.internal  Running  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:18:19 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:19:00 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:19:00 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:18:19 +0000 UTC  }]
+Jun 20 10:19:11.951: INFO: ss-1  ip-10-100-12-226.eu-west-1.compute.internal  Running  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:18:39 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:19:01 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:19:01 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:18:39 +0000 UTC  }]
+Jun 20 10:19:11.951: INFO: ss-2  ip-10-100-12-226.eu-west-1.compute.internal  Running  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:18:39 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:19:01 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:19:01 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:18:39 +0000 UTC  }]
+Jun 20 10:19:11.951: INFO: 
+Jun 20 10:19:11.951: INFO: StatefulSet ss has not reached scale 0, at 3
+Jun 20 10:19:12.954: INFO: POD   NODE                                         PHASE    GRACE  CONDITIONS
+Jun 20 10:19:12.954: INFO: ss-0  ip-10-100-10-111.eu-west-1.compute.internal  Running  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:18:19 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:19:00 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:19:00 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:18:19 +0000 UTC  }]
+Jun 20 10:19:12.955: INFO: ss-1  ip-10-100-12-226.eu-west-1.compute.internal  Pending  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:18:39 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:19:01 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:19:01 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:18:39 +0000 UTC  }]
+Jun 20 10:19:12.955: INFO: ss-2  ip-10-100-12-226.eu-west-1.compute.internal  Pending  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:18:39 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:19:01 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:19:01 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:18:39 +0000 UTC  }]
+Jun 20 10:19:12.955: INFO: 
+Jun 20 10:19:12.955: INFO: StatefulSet ss has not reached scale 0, at 3
+Jun 20 10:19:13.958: INFO: Verifying statefulset ss doesn't scale past 0 for another 6.988807008s
+Jun 20 10:19:14.961: INFO: Verifying statefulset ss doesn't scale past 0 for another 5.985158323s
+Jun 20 10:19:15.964: INFO: Verifying statefulset ss doesn't scale past 0 for another 4.982206967s
+Jun 20 10:19:16.967: INFO: Verifying statefulset ss doesn't scale past 0 for another 3.979017781s
+Jun 20 10:19:17.970: INFO: Verifying statefulset ss doesn't scale past 0 for another 2.976036719s
+Jun 20 10:19:18.973: INFO: Verifying statefulset ss doesn't scale past 0 for another 1.972948895s
+Jun 20 10:19:19.977: INFO: Verifying statefulset ss doesn't scale past 0 for another 969.837019ms
+STEP: Scaling down stateful set ss to 0 replicas and waiting until none of pods will run in namespacestatefulset-3361
+Jun 20 10:19:20.980: INFO: Scaling statefulset ss to 0
+Jun 20 10:19:20.987: INFO: Waiting for statefulset status.replicas updated to 0
+[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:86
+Jun 20 10:19:20.989: INFO: Deleting all statefulset in ns statefulset-3361
+Jun 20 10:19:20.991: INFO: Scaling statefulset ss to 0
+Jun 20 10:19:21.000: INFO: Waiting for statefulset status.replicas updated to 0
+Jun 20 10:19:21.002: INFO: Deleting statefulset ss
+[AfterEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:19:21.012: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "statefulset-3361" for this suite.
+Jun 20 10:19:27.027: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:19:27.094: INFO: namespace statefulset-3361 deletion completed in 6.078641583s
+
+• [SLOW TEST:67.426 seconds]
+[sig-apps] StatefulSet
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23
+  [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+    Burst scaling should run to completion even with unhealthy pods [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected secret 
+  should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Projected secret
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:19:27.094: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating projection with secret that has name projected-secret-test-86fa74a2-430b-4b99-aef8-727f8402e0b0
+STEP: Creating a pod to test consume secrets
+Jun 20 10:19:27.139: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-83ac0c0a-ede7-4762-8432-484c94374d3e" in namespace "projected-2834" to be "success or failure"
+Jun 20 10:19:27.145: INFO: Pod "pod-projected-secrets-83ac0c0a-ede7-4762-8432-484c94374d3e": Phase="Pending", Reason="", readiness=false. Elapsed: 5.955133ms
+Jun 20 10:19:29.148: INFO: Pod "pod-projected-secrets-83ac0c0a-ede7-4762-8432-484c94374d3e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.009486733s
+Jun 20 10:19:31.152: INFO: Pod "pod-projected-secrets-83ac0c0a-ede7-4762-8432-484c94374d3e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.012784381s
+STEP: Saw pod success
+Jun 20 10:19:31.152: INFO: Pod "pod-projected-secrets-83ac0c0a-ede7-4762-8432-484c94374d3e" satisfied condition "success or failure"
+Jun 20 10:19:31.154: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-projected-secrets-83ac0c0a-ede7-4762-8432-484c94374d3e container projected-secret-volume-test: 
+STEP: delete the pod
+Jun 20 10:19:31.177: INFO: Waiting for pod pod-projected-secrets-83ac0c0a-ede7-4762-8432-484c94374d3e to disappear
+Jun 20 10:19:31.184: INFO: Pod pod-projected-secrets-83ac0c0a-ede7-4762-8432-484c94374d3e no longer exists
+[AfterEach] [sig-storage] Projected secret
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:19:31.184: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-2834" for this suite.
+Jun 20 10:19:37.197: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:19:37.266: INFO: namespace projected-2834 deletion completed in 6.079487034s
+
+• [SLOW TEST:10.172 seconds]
+[sig-storage] Projected secret
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:33
+  should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSS
+------------------------------
+[sig-network] DNS 
+  should provide DNS for services  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-network] DNS
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:19:37.266: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename dns
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide DNS for services  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a test headless service
+STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service.dns-7076.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.dns-7076.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-7076.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.dns-7076.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-7076.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-7076.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-7076.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.test-service-2.dns-7076.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-7076.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.test-service-2.dns-7076.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-7076.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;check="$$(dig +notcp +noall +answer +search 79.63.96.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.96.63.79_udp@PTR;check="$$(dig +tcp +noall +answer +search 79.63.96.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.96.63.79_tcp@PTR;sleep 1; done
+
+STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service.dns-7076.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.dns-7076.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-7076.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.dns-7076.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-7076.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-7076.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-7076.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.test-service-2.dns-7076.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-7076.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.test-service-2.dns-7076.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-7076.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;check="$$(dig +notcp +noall +answer +search 79.63.96.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.96.63.79_udp@PTR;check="$$(dig +tcp +noall +answer +search 79.63.96.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.96.63.79_tcp@PTR;sleep 1; done
+
+STEP: creating a pod to probe DNS
+STEP: submitting the pod to kubernetes
+STEP: retrieving the pod
+STEP: looking for the results for each expected name from probers
+Jun 20 10:19:41.343: INFO: Unable to read wheezy_udp@dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:41.346: INFO: Unable to read wheezy_tcp@dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:41.350: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:41.354: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:41.390: INFO: Unable to read jessie_udp@dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:41.398: INFO: Unable to read jessie_tcp@dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:41.413: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:41.417: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:41.439: INFO: Lookups using dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51 failed for: [wheezy_udp@dns-test-service.dns-7076.svc.cluster.local wheezy_tcp@dns-test-service.dns-7076.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local jessie_udp@dns-test-service.dns-7076.svc.cluster.local jessie_tcp@dns-test-service.dns-7076.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local]
+
+Jun 20 10:19:46.443: INFO: Unable to read wheezy_udp@dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:46.447: INFO: Unable to read wheezy_tcp@dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:46.451: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:46.454: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:46.475: INFO: Unable to read jessie_udp@dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:46.481: INFO: Unable to read jessie_tcp@dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:46.484: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:46.487: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:46.506: INFO: Lookups using dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51 failed for: [wheezy_udp@dns-test-service.dns-7076.svc.cluster.local wheezy_tcp@dns-test-service.dns-7076.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local jessie_udp@dns-test-service.dns-7076.svc.cluster.local jessie_tcp@dns-test-service.dns-7076.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local]
+
+Jun 20 10:19:51.445: INFO: Unable to read wheezy_udp@dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:51.451: INFO: Unable to read wheezy_tcp@dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:51.456: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:51.460: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:51.503: INFO: Unable to read jessie_udp@dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:51.506: INFO: Unable to read jessie_tcp@dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:51.509: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:51.513: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:51.534: INFO: Lookups using dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51 failed for: [wheezy_udp@dns-test-service.dns-7076.svc.cluster.local wheezy_tcp@dns-test-service.dns-7076.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local jessie_udp@dns-test-service.dns-7076.svc.cluster.local jessie_tcp@dns-test-service.dns-7076.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local]
+
+Jun 20 10:19:56.446: INFO: Unable to read wheezy_udp@dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:56.451: INFO: Unable to read wheezy_tcp@dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:56.455: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:56.459: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:56.496: INFO: Unable to read jessie_udp@dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:56.504: INFO: Unable to read jessie_tcp@dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:56.512: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:56.516: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:19:56.537: INFO: Lookups using dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51 failed for: [wheezy_udp@dns-test-service.dns-7076.svc.cluster.local wheezy_tcp@dns-test-service.dns-7076.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local jessie_udp@dns-test-service.dns-7076.svc.cluster.local jessie_tcp@dns-test-service.dns-7076.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local]
+
+Jun 20 10:20:01.445: INFO: Unable to read wheezy_udp@dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:20:01.449: INFO: Unable to read wheezy_tcp@dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:20:01.454: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:20:01.458: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:20:01.496: INFO: Unable to read jessie_udp@dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:20:01.501: INFO: Unable to read jessie_tcp@dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:20:01.506: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:20:01.510: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:20:01.574: INFO: Lookups using dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51 failed for: [wheezy_udp@dns-test-service.dns-7076.svc.cluster.local wheezy_tcp@dns-test-service.dns-7076.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local jessie_udp@dns-test-service.dns-7076.svc.cluster.local jessie_tcp@dns-test-service.dns-7076.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local]
+
+Jun 20 10:20:06.444: INFO: Unable to read wheezy_udp@dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:20:06.448: INFO: Unable to read wheezy_tcp@dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:20:06.451: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:20:06.454: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:20:06.479: INFO: Unable to read jessie_udp@dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:20:06.482: INFO: Unable to read jessie_tcp@dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:20:06.485: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:20:06.489: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local from pod dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51: the server could not find the requested resource (get pods dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51)
+Jun 20 10:20:06.509: INFO: Lookups using dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51 failed for: [wheezy_udp@dns-test-service.dns-7076.svc.cluster.local wheezy_tcp@dns-test-service.dns-7076.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local jessie_udp@dns-test-service.dns-7076.svc.cluster.local jessie_tcp@dns-test-service.dns-7076.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-7076.svc.cluster.local]
+
+Jun 20 10:20:11.559: INFO: DNS probes using dns-7076/dns-test-b0f749fc-1fd7-4ca6-99d2-c98aad381c51 succeeded
+
+STEP: deleting the pod
+STEP: deleting the test service
+STEP: deleting the test headless service
+[AfterEach] [sig-network] DNS
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:20:11.612: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "dns-7076" for this suite.
+Jun 20 10:20:17.632: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:20:17.718: INFO: namespace dns-7076 deletion completed in 6.101093353s
+
+• [SLOW TEST:40.452 seconds]
+[sig-network] DNS
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23
+  should provide DNS for services  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-scheduling] SchedulerPredicates [Serial] 
+  validates that NodeSelector is respected if matching  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:20:17.719: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename sched-pred
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:81
+Jun 20 10:20:17.754: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready
+Jun 20 10:20:17.761: INFO: Waiting for terminating namespaces to be deleted...
+Jun 20 10:20:17.763: INFO: 
+Logging pods the kubelet thinks is on node ip-10-100-10-111.eu-west-1.compute.internal before test
+Jun 20 10:20:17.768: INFO: sonobuoy from heptio-sonobuoy started at 2019-06-20 09:59:16 +0000 UTC (1 container statuses recorded)
+Jun 20 10:20:17.768: INFO: 	Container kube-sonobuoy ready: true, restart count 0
+Jun 20 10:20:17.768: INFO: weave-net-nh2zg from kube-system started at 2019-06-20 09:35:33 +0000 UTC (2 container statuses recorded)
+Jun 20 10:20:17.768: INFO: 	Container weave ready: true, restart count 0
+Jun 20 10:20:17.768: INFO: 	Container weave-npc ready: true, restart count 0
+Jun 20 10:20:17.768: INFO: sonobuoy-systemd-logs-daemon-set-de69c0149d564b9d-fz4v7 from heptio-sonobuoy started at 2019-06-20 09:59:22 +0000 UTC (2 container statuses recorded)
+Jun 20 10:20:17.768: INFO: 	Container sonobuoy-worker ready: true, restart count 0
+Jun 20 10:20:17.768: INFO: 	Container systemd-logs ready: true, restart count 0
+Jun 20 10:20:17.768: INFO: kube-proxy-9j68g from kube-system started at 2019-06-20 09:12:47 +0000 UTC (1 container statuses recorded)
+Jun 20 10:20:17.768: INFO: 	Container kube-proxy ready: true, restart count 0
+Jun 20 10:20:17.768: INFO: 
+Logging pods the kubelet thinks is on node ip-10-100-12-226.eu-west-1.compute.internal before test
+Jun 20 10:20:17.772: INFO: sonobuoy-systemd-logs-daemon-set-de69c0149d564b9d-m4zhg from heptio-sonobuoy started at 2019-06-20 09:59:22 +0000 UTC (2 container statuses recorded)
+Jun 20 10:20:17.772: INFO: 	Container sonobuoy-worker ready: true, restart count 0
+Jun 20 10:20:17.772: INFO: 	Container systemd-logs ready: true, restart count 0
+Jun 20 10:20:17.772: INFO: kube-proxy-4c7sq from kube-system started at 2019-06-20 09:12:47 +0000 UTC (1 container statuses recorded)
+Jun 20 10:20:17.772: INFO: 	Container kube-proxy ready: true, restart count 0
+Jun 20 10:20:17.772: INFO: weave-net-9x9dh from kube-system started at 2019-06-20 09:35:33 +0000 UTC (2 container statuses recorded)
+Jun 20 10:20:17.772: INFO: 	Container weave ready: true, restart count 0
+Jun 20 10:20:17.772: INFO: 	Container weave-npc ready: true, restart count 0
+Jun 20 10:20:17.772: INFO: sonobuoy-e2e-job-dca131905d74464b from heptio-sonobuoy started at 2019-06-20 09:59:22 +0000 UTC (2 container statuses recorded)
+Jun 20 10:20:17.772: INFO: 	Container e2e ready: true, restart count 0
+Jun 20 10:20:17.772: INFO: 	Container sonobuoy-worker ready: true, restart count 0
+[It] validates that NodeSelector is respected if matching  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Trying to launch a pod without a label to get a node which can launch it.
+STEP: Explicitly delete pod here to free the resource it takes.
+STEP: Trying to apply a random label on the found node.
+STEP: verifying the node has the label kubernetes.io/e2e-f4729135-93f9-4277-b6d5-7c66a8d715dd 42
+STEP: Trying to relaunch the pod, now with labels.
+STEP: removing the label kubernetes.io/e2e-f4729135-93f9-4277-b6d5-7c66a8d715dd off the node ip-10-100-10-111.eu-west-1.compute.internal
+STEP: verifying the node doesn't have the label kubernetes.io/e2e-f4729135-93f9-4277-b6d5-7c66a8d715dd
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:20:23.844: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "sched-pred-8548" for this suite.
+Jun 20 10:20:41.857: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:20:41.931: INFO: namespace sched-pred-8548 deletion completed in 18.083540523s
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:72
+
+• [SLOW TEST:24.212 seconds]
+[sig-scheduling] SchedulerPredicates [Serial]
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:23
+  validates that NodeSelector is respected if matching  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSS
+------------------------------
+[k8s.io] InitContainer [NodeConformance] 
+  should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:20:41.931: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename init-container
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:44
+[It] should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: creating the pod
+Jun 20 10:20:41.977: INFO: PodSpec: initContainers in spec.initContainers
+[AfterEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:20:44.852: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "init-container-1142" for this suite.
+Jun 20 10:20:50.867: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:20:50.935: INFO: namespace init-container-1142 deletion completed in 6.078286818s
+
+• [SLOW TEST:9.004 seconds]
+[k8s.io] InitContainer [NodeConformance]
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-network] Service endpoints latency 
+  should not be very high  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-network] Service endpoints latency
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:20:50.936: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename svc-latency
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should not be very high  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: creating replication controller svc-latency-rc in namespace svc-latency-1427
+I0620 10:20:50.976862      15 runners.go:180] Created replication controller with name: svc-latency-rc, namespace: svc-latency-1427, replica count: 1
+I0620 10:20:52.027423      15 runners.go:180] svc-latency-rc Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady 
+I0620 10:20:53.027615      15 runners.go:180] svc-latency-rc Pods: 1 out of 1 created, 1 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady 
+Jun 20 10:20:53.136: INFO: Created: latency-svc-rmkqb
+Jun 20 10:20:53.143: INFO: Got endpoints: latency-svc-rmkqb [15.300136ms]
+Jun 20 10:20:53.156: INFO: Created: latency-svc-2bj7j
+Jun 20 10:20:53.163: INFO: Created: latency-svc-88qnp
+Jun 20 10:20:53.165: INFO: Got endpoints: latency-svc-2bj7j [22.45548ms]
+Jun 20 10:20:53.170: INFO: Got endpoints: latency-svc-88qnp [26.917421ms]
+Jun 20 10:20:53.181: INFO: Created: latency-svc-fzzfs
+Jun 20 10:20:53.181: INFO: Created: latency-svc-z2wb2
+Jun 20 10:20:53.186: INFO: Got endpoints: latency-svc-z2wb2 [42.855205ms]
+Jun 20 10:20:53.188: INFO: Got endpoints: latency-svc-fzzfs [44.792584ms]
+Jun 20 10:20:53.192: INFO: Created: latency-svc-lwzfx
+Jun 20 10:20:53.197: INFO: Got endpoints: latency-svc-lwzfx [54.214207ms]
+Jun 20 10:20:53.198: INFO: Created: latency-svc-kb9xd
+Jun 20 10:20:53.203: INFO: Got endpoints: latency-svc-kb9xd [60.066355ms]
+Jun 20 10:20:53.204: INFO: Created: latency-svc-dm499
+Jun 20 10:20:53.209: INFO: Got endpoints: latency-svc-dm499 [65.886369ms]
+Jun 20 10:20:53.212: INFO: Created: latency-svc-jkt66
+Jun 20 10:20:53.218: INFO: Created: latency-svc-zqshc
+Jun 20 10:20:53.218: INFO: Got endpoints: latency-svc-jkt66 [75.015129ms]
+Jun 20 10:20:53.223: INFO: Got endpoints: latency-svc-zqshc [80.113216ms]
+Jun 20 10:20:53.227: INFO: Created: latency-svc-2gg7s
+Jun 20 10:20:53.232: INFO: Got endpoints: latency-svc-2gg7s [88.727693ms]
+Jun 20 10:20:53.234: INFO: Created: latency-svc-qnj4n
+Jun 20 10:20:53.240: INFO: Got endpoints: latency-svc-qnj4n [96.298597ms]
+Jun 20 10:20:53.240: INFO: Created: latency-svc-2pxjl
+Jun 20 10:20:53.246: INFO: Got endpoints: latency-svc-2pxjl [102.956323ms]
+Jun 20 10:20:53.247: INFO: Created: latency-svc-76prp
+Jun 20 10:20:53.255: INFO: Created: latency-svc-kcfjw
+Jun 20 10:20:53.256: INFO: Got endpoints: latency-svc-76prp [112.281446ms]
+Jun 20 10:20:53.261: INFO: Got endpoints: latency-svc-kcfjw [117.394251ms]
+Jun 20 10:20:53.264: INFO: Created: latency-svc-ztvst
+Jun 20 10:20:53.269: INFO: Got endpoints: latency-svc-ztvst [125.839388ms]
+Jun 20 10:20:53.273: INFO: Created: latency-svc-znvsq
+Jun 20 10:20:53.277: INFO: Created: latency-svc-qxssk
+Jun 20 10:20:53.278: INFO: Got endpoints: latency-svc-znvsq [112.972742ms]
+Jun 20 10:20:53.283: INFO: Created: latency-svc-4w7wt
+Jun 20 10:20:53.284: INFO: Got endpoints: latency-svc-qxssk [114.640906ms]
+Jun 20 10:20:53.288: INFO: Got endpoints: latency-svc-4w7wt [102.125504ms]
+Jun 20 10:20:53.291: INFO: Created: latency-svc-cqjtt
+Jun 20 10:20:53.296: INFO: Got endpoints: latency-svc-cqjtt [108.500119ms]
+Jun 20 10:20:53.299: INFO: Created: latency-svc-qcrrg
+Jun 20 10:20:53.305: INFO: Got endpoints: latency-svc-qcrrg [108.155854ms]
+Jun 20 10:20:53.306: INFO: Created: latency-svc-2tm8b
+Jun 20 10:20:53.311: INFO: Got endpoints: latency-svc-2tm8b [107.500681ms]
+Jun 20 10:20:53.314: INFO: Created: latency-svc-8bptd
+Jun 20 10:20:53.320: INFO: Got endpoints: latency-svc-8bptd [110.874315ms]
+Jun 20 10:20:53.324: INFO: Created: latency-svc-7vbxz
+Jun 20 10:20:53.328: INFO: Got endpoints: latency-svc-7vbxz [110.231331ms]
+Jun 20 10:20:53.332: INFO: Created: latency-svc-92d27
+Jun 20 10:20:53.339: INFO: Got endpoints: latency-svc-92d27 [115.510166ms]
+Jun 20 10:20:53.343: INFO: Created: latency-svc-xcpv2
+Jun 20 10:20:53.346: INFO: Got endpoints: latency-svc-xcpv2 [114.321549ms]
+Jun 20 10:20:53.350: INFO: Created: latency-svc-gh9hj
+Jun 20 10:20:53.358: INFO: Got endpoints: latency-svc-gh9hj [117.800536ms]
+Jun 20 10:20:53.362: INFO: Created: latency-svc-cpdtm
+Jun 20 10:20:53.369: INFO: Created: latency-svc-hxwk7
+Jun 20 10:20:53.369: INFO: Got endpoints: latency-svc-cpdtm [122.710844ms]
+Jun 20 10:20:53.373: INFO: Got endpoints: latency-svc-hxwk7 [117.502731ms]
+Jun 20 10:20:53.377: INFO: Created: latency-svc-h8sk4
+Jun 20 10:20:53.381: INFO: Got endpoints: latency-svc-h8sk4 [120.53771ms]
+Jun 20 10:20:53.384: INFO: Created: latency-svc-jrfz9
+Jun 20 10:20:53.389: INFO: Got endpoints: latency-svc-jrfz9 [119.275913ms]
+Jun 20 10:20:53.391: INFO: Created: latency-svc-2pm78
+Jun 20 10:20:53.398: INFO: Got endpoints: latency-svc-2pm78 [119.629457ms]
+Jun 20 10:20:53.402: INFO: Created: latency-svc-wg6g6
+Jun 20 10:20:53.406: INFO: Got endpoints: latency-svc-wg6g6 [121.814586ms]
+Jun 20 10:20:53.411: INFO: Created: latency-svc-nllzl
+Jun 20 10:20:53.414: INFO: Got endpoints: latency-svc-nllzl [126.473049ms]
+Jun 20 10:20:53.416: INFO: Created: latency-svc-fd6vb
+Jun 20 10:20:53.423: INFO: Got endpoints: latency-svc-fd6vb [126.384799ms]
+Jun 20 10:20:53.424: INFO: Created: latency-svc-c6qpg
+Jun 20 10:20:53.430: INFO: Created: latency-svc-n8msg
+Jun 20 10:20:53.438: INFO: Created: latency-svc-r2k4c
+Jun 20 10:20:53.446: INFO: Created: latency-svc-zk7rg
+Jun 20 10:20:53.456: INFO: Got endpoints: latency-svc-c6qpg [150.001932ms]
+Jun 20 10:20:53.458: INFO: Created: latency-svc-dvdsc
+Jun 20 10:20:53.465: INFO: Created: latency-svc-p5tmb
+Jun 20 10:20:53.471: INFO: Created: latency-svc-dm4c2
+Jun 20 10:20:53.478: INFO: Created: latency-svc-7n9tv
+Jun 20 10:20:53.486: INFO: Created: latency-svc-vbscd
+Jun 20 10:20:53.494: INFO: Got endpoints: latency-svc-n8msg [183.72381ms]
+Jun 20 10:20:53.500: INFO: Created: latency-svc-g46t6
+Jun 20 10:20:53.501: INFO: Created: latency-svc-lsvvn
+Jun 20 10:20:53.513: INFO: Created: latency-svc-t78hm
+Jun 20 10:20:53.523: INFO: Created: latency-svc-p8tmg
+Jun 20 10:20:53.529: INFO: Created: latency-svc-k5swc
+Jun 20 10:20:53.541: INFO: Created: latency-svc-lt6vx
+Jun 20 10:20:53.543: INFO: Got endpoints: latency-svc-zk7rg [214.512832ms]
+Jun 20 10:20:53.546: INFO: Created: latency-svc-45bct
+Jun 20 10:20:53.555: INFO: Created: latency-svc-l65wt
+Jun 20 10:20:53.566: INFO: Created: latency-svc-w8k97
+Jun 20 10:20:53.593: INFO: Got endpoints: latency-svc-r2k4c [272.925031ms]
+Jun 20 10:20:53.603: INFO: Created: latency-svc-sgv52
+Jun 20 10:20:53.642: INFO: Got endpoints: latency-svc-dvdsc [303.121244ms]
+Jun 20 10:20:53.652: INFO: Created: latency-svc-5mx2n
+Jun 20 10:20:53.692: INFO: Got endpoints: latency-svc-p5tmb [345.614161ms]
+Jun 20 10:20:53.703: INFO: Created: latency-svc-fbgvh
+Jun 20 10:20:53.745: INFO: Got endpoints: latency-svc-dm4c2 [387.568991ms]
+Jun 20 10:20:53.755: INFO: Created: latency-svc-6wcqm
+Jun 20 10:20:53.792: INFO: Got endpoints: latency-svc-7n9tv [422.8379ms]
+Jun 20 10:20:53.801: INFO: Created: latency-svc-5dgr2
+Jun 20 10:20:53.842: INFO: Got endpoints: latency-svc-vbscd [468.495873ms]
+Jun 20 10:20:53.852: INFO: Created: latency-svc-hv94m
+Jun 20 10:20:53.892: INFO: Got endpoints: latency-svc-g46t6 [510.183126ms]
+Jun 20 10:20:53.902: INFO: Created: latency-svc-vsmwc
+Jun 20 10:20:53.941: INFO: Got endpoints: latency-svc-lsvvn [552.509567ms]
+Jun 20 10:20:53.951: INFO: Created: latency-svc-vdkhp
+Jun 20 10:20:53.992: INFO: Got endpoints: latency-svc-t78hm [593.911051ms]
+Jun 20 10:20:54.002: INFO: Created: latency-svc-s4m7j
+Jun 20 10:20:54.043: INFO: Got endpoints: latency-svc-p8tmg [636.863956ms]
+Jun 20 10:20:54.054: INFO: Created: latency-svc-tgtqp
+Jun 20 10:20:54.092: INFO: Got endpoints: latency-svc-k5swc [677.517669ms]
+Jun 20 10:20:54.103: INFO: Created: latency-svc-r6g4l
+Jun 20 10:20:54.142: INFO: Got endpoints: latency-svc-lt6vx [719.196885ms]
+Jun 20 10:20:54.177: INFO: Created: latency-svc-vk9m9
+Jun 20 10:20:54.192: INFO: Got endpoints: latency-svc-45bct [736.452276ms]
+Jun 20 10:20:54.204: INFO: Created: latency-svc-4x97d
+Jun 20 10:20:54.242: INFO: Got endpoints: latency-svc-l65wt [747.404589ms]
+Jun 20 10:20:54.253: INFO: Created: latency-svc-ndhtc
+Jun 20 10:20:54.294: INFO: Got endpoints: latency-svc-w8k97 [750.264694ms]
+Jun 20 10:20:54.305: INFO: Created: latency-svc-zg2h8
+Jun 20 10:20:54.343: INFO: Got endpoints: latency-svc-sgv52 [750.132563ms]
+Jun 20 10:20:54.353: INFO: Created: latency-svc-9smlm
+Jun 20 10:20:54.395: INFO: Got endpoints: latency-svc-5mx2n [752.368489ms]
+Jun 20 10:20:54.406: INFO: Created: latency-svc-bss8m
+Jun 20 10:20:54.442: INFO: Got endpoints: latency-svc-fbgvh [750.025289ms]
+Jun 20 10:20:54.453: INFO: Created: latency-svc-hwb2l
+Jun 20 10:20:54.492: INFO: Got endpoints: latency-svc-6wcqm [746.443935ms]
+Jun 20 10:20:54.506: INFO: Created: latency-svc-mpg4j
+Jun 20 10:20:54.543: INFO: Got endpoints: latency-svc-5dgr2 [750.523502ms]
+Jun 20 10:20:54.553: INFO: Created: latency-svc-bg8vc
+Jun 20 10:20:54.592: INFO: Got endpoints: latency-svc-hv94m [750.141302ms]
+Jun 20 10:20:54.605: INFO: Created: latency-svc-r8j7t
+Jun 20 10:20:54.642: INFO: Got endpoints: latency-svc-vsmwc [749.985423ms]
+Jun 20 10:20:54.657: INFO: Created: latency-svc-tsmhg
+Jun 20 10:20:54.692: INFO: Got endpoints: latency-svc-vdkhp [750.214801ms]
+Jun 20 10:20:54.706: INFO: Created: latency-svc-vhhw4
+Jun 20 10:20:54.742: INFO: Got endpoints: latency-svc-s4m7j [749.844242ms]
+Jun 20 10:20:54.751: INFO: Created: latency-svc-jzxht
+Jun 20 10:20:54.792: INFO: Got endpoints: latency-svc-tgtqp [748.55499ms]
+Jun 20 10:20:54.801: INFO: Created: latency-svc-c552d
+Jun 20 10:20:54.841: INFO: Got endpoints: latency-svc-r6g4l [749.089181ms]
+Jun 20 10:20:54.851: INFO: Created: latency-svc-z267g
+Jun 20 10:20:54.892: INFO: Got endpoints: latency-svc-vk9m9 [749.928037ms]
+Jun 20 10:20:54.907: INFO: Created: latency-svc-mgps2
+Jun 20 10:20:54.943: INFO: Got endpoints: latency-svc-4x97d [750.567739ms]
+Jun 20 10:20:54.954: INFO: Created: latency-svc-k5r28
+Jun 20 10:20:54.992: INFO: Got endpoints: latency-svc-ndhtc [749.654635ms]
+Jun 20 10:20:55.003: INFO: Created: latency-svc-qd7g5
+Jun 20 10:20:55.042: INFO: Got endpoints: latency-svc-zg2h8 [748.334978ms]
+Jun 20 10:20:55.052: INFO: Created: latency-svc-fd7f2
+Jun 20 10:20:55.092: INFO: Got endpoints: latency-svc-9smlm [749.221913ms]
+Jun 20 10:20:55.103: INFO: Created: latency-svc-d2b6k
+Jun 20 10:20:55.142: INFO: Got endpoints: latency-svc-bss8m [747.068632ms]
+Jun 20 10:20:55.152: INFO: Created: latency-svc-tg6q4
+Jun 20 10:20:55.193: INFO: Got endpoints: latency-svc-hwb2l [750.693525ms]
+Jun 20 10:20:55.203: INFO: Created: latency-svc-g7t88
+Jun 20 10:20:55.242: INFO: Got endpoints: latency-svc-mpg4j [750.339034ms]
+Jun 20 10:20:55.254: INFO: Created: latency-svc-s9xv8
+Jun 20 10:20:55.292: INFO: Got endpoints: latency-svc-bg8vc [748.996391ms]
+Jun 20 10:20:55.304: INFO: Created: latency-svc-lcbsm
+Jun 20 10:20:55.342: INFO: Got endpoints: latency-svc-r8j7t [750.077057ms]
+Jun 20 10:20:55.356: INFO: Created: latency-svc-ckzlm
+Jun 20 10:20:55.397: INFO: Got endpoints: latency-svc-tsmhg [755.063693ms]
+Jun 20 10:20:55.407: INFO: Created: latency-svc-8nddf
+Jun 20 10:20:55.442: INFO: Got endpoints: latency-svc-vhhw4 [749.987533ms]
+Jun 20 10:20:55.452: INFO: Created: latency-svc-kw4nr
+Jun 20 10:20:55.492: INFO: Got endpoints: latency-svc-jzxht [750.082602ms]
+Jun 20 10:20:55.503: INFO: Created: latency-svc-n8t6k
+Jun 20 10:20:55.544: INFO: Got endpoints: latency-svc-c552d [751.74519ms]
+Jun 20 10:20:55.555: INFO: Created: latency-svc-5v4hv
+Jun 20 10:20:55.593: INFO: Got endpoints: latency-svc-z267g [751.382987ms]
+Jun 20 10:20:55.603: INFO: Created: latency-svc-zt5zg
+Jun 20 10:20:55.642: INFO: Got endpoints: latency-svc-mgps2 [750.184271ms]
+Jun 20 10:20:55.653: INFO: Created: latency-svc-lg7cv
+Jun 20 10:20:55.692: INFO: Got endpoints: latency-svc-k5r28 [748.573319ms]
+Jun 20 10:20:55.703: INFO: Created: latency-svc-f84pd
+Jun 20 10:20:55.743: INFO: Got endpoints: latency-svc-qd7g5 [750.861656ms]
+Jun 20 10:20:55.754: INFO: Created: latency-svc-r4rrh
+Jun 20 10:20:55.792: INFO: Got endpoints: latency-svc-fd7f2 [749.685436ms]
+Jun 20 10:20:55.805: INFO: Created: latency-svc-rfndq
+Jun 20 10:20:55.842: INFO: Got endpoints: latency-svc-d2b6k [750.018732ms]
+Jun 20 10:20:55.853: INFO: Created: latency-svc-qz5ql
+Jun 20 10:20:55.892: INFO: Got endpoints: latency-svc-tg6q4 [750.452889ms]
+Jun 20 10:20:55.903: INFO: Created: latency-svc-4fccs
+Jun 20 10:20:55.944: INFO: Got endpoints: latency-svc-g7t88 [750.966545ms]
+Jun 20 10:20:55.959: INFO: Created: latency-svc-rfw7b
+Jun 20 10:20:55.992: INFO: Got endpoints: latency-svc-s9xv8 [749.657165ms]
+Jun 20 10:20:56.002: INFO: Created: latency-svc-dwj9f
+Jun 20 10:20:56.042: INFO: Got endpoints: latency-svc-lcbsm [750.584654ms]
+Jun 20 10:20:56.056: INFO: Created: latency-svc-52dcf
+Jun 20 10:20:56.093: INFO: Got endpoints: latency-svc-ckzlm [751.085384ms]
+Jun 20 10:20:56.109: INFO: Created: latency-svc-w8k5r
+Jun 20 10:20:56.143: INFO: Got endpoints: latency-svc-8nddf [746.467666ms]
+Jun 20 10:20:56.156: INFO: Created: latency-svc-5gpq7
+Jun 20 10:20:56.193: INFO: Got endpoints: latency-svc-kw4nr [750.291861ms]
+Jun 20 10:20:56.204: INFO: Created: latency-svc-bxk6v
+Jun 20 10:20:56.246: INFO: Got endpoints: latency-svc-n8t6k [753.733705ms]
+Jun 20 10:20:56.261: INFO: Created: latency-svc-h6k9z
+Jun 20 10:20:56.293: INFO: Got endpoints: latency-svc-5v4hv [749.735153ms]
+Jun 20 10:20:56.303: INFO: Created: latency-svc-lqr94
+Jun 20 10:20:56.342: INFO: Got endpoints: latency-svc-zt5zg [749.198596ms]
+Jun 20 10:20:56.353: INFO: Created: latency-svc-77s82
+Jun 20 10:20:56.393: INFO: Got endpoints: latency-svc-lg7cv [750.220904ms]
+Jun 20 10:20:56.403: INFO: Created: latency-svc-gvc2w
+Jun 20 10:20:56.443: INFO: Got endpoints: latency-svc-f84pd [751.385779ms]
+Jun 20 10:20:56.453: INFO: Created: latency-svc-8jl9p
+Jun 20 10:20:56.492: INFO: Got endpoints: latency-svc-r4rrh [748.869765ms]
+Jun 20 10:20:56.502: INFO: Created: latency-svc-xtfp7
+Jun 20 10:20:56.543: INFO: Got endpoints: latency-svc-rfndq [750.776707ms]
+Jun 20 10:20:56.553: INFO: Created: latency-svc-p4pjs
+Jun 20 10:20:56.592: INFO: Got endpoints: latency-svc-qz5ql [750.00994ms]
+Jun 20 10:20:56.604: INFO: Created: latency-svc-b7txk
+Jun 20 10:20:56.642: INFO: Got endpoints: latency-svc-4fccs [749.337839ms]
+Jun 20 10:20:56.652: INFO: Created: latency-svc-qpt7l
+Jun 20 10:20:56.693: INFO: Got endpoints: latency-svc-rfw7b [748.789779ms]
+Jun 20 10:20:56.704: INFO: Created: latency-svc-scfc6
+Jun 20 10:20:56.742: INFO: Got endpoints: latency-svc-dwj9f [749.087591ms]
+Jun 20 10:20:56.754: INFO: Created: latency-svc-j9cbw
+Jun 20 10:20:56.792: INFO: Got endpoints: latency-svc-52dcf [749.756312ms]
+Jun 20 10:20:56.802: INFO: Created: latency-svc-lqzrs
+Jun 20 10:20:56.846: INFO: Got endpoints: latency-svc-w8k5r [752.553327ms]
+Jun 20 10:20:56.858: INFO: Created: latency-svc-dbk2v
+Jun 20 10:20:56.892: INFO: Got endpoints: latency-svc-5gpq7 [749.066869ms]
+Jun 20 10:20:56.902: INFO: Created: latency-svc-dqq97
+Jun 20 10:20:56.942: INFO: Got endpoints: latency-svc-bxk6v [749.386443ms]
+Jun 20 10:20:56.951: INFO: Created: latency-svc-cmz2n
+Jun 20 10:20:56.993: INFO: Got endpoints: latency-svc-h6k9z [746.956906ms]
+Jun 20 10:20:57.003: INFO: Created: latency-svc-mdf9r
+Jun 20 10:20:57.043: INFO: Got endpoints: latency-svc-lqr94 [749.042552ms]
+Jun 20 10:20:57.053: INFO: Created: latency-svc-fsskb
+Jun 20 10:20:57.095: INFO: Got endpoints: latency-svc-77s82 [752.778583ms]
+Jun 20 10:20:57.106: INFO: Created: latency-svc-4zvnf
+Jun 20 10:20:57.143: INFO: Got endpoints: latency-svc-gvc2w [750.201647ms]
+Jun 20 10:20:57.153: INFO: Created: latency-svc-qbc7c
+Jun 20 10:20:57.192: INFO: Got endpoints: latency-svc-8jl9p [748.566855ms]
+Jun 20 10:20:57.206: INFO: Created: latency-svc-tvfvz
+Jun 20 10:20:57.243: INFO: Got endpoints: latency-svc-xtfp7 [750.554213ms]
+Jun 20 10:20:57.254: INFO: Created: latency-svc-s5j9s
+Jun 20 10:20:57.292: INFO: Got endpoints: latency-svc-p4pjs [748.986745ms]
+Jun 20 10:20:57.305: INFO: Created: latency-svc-8qx6b
+Jun 20 10:20:57.342: INFO: Got endpoints: latency-svc-b7txk [750.049871ms]
+Jun 20 10:20:57.353: INFO: Created: latency-svc-zltfl
+Jun 20 10:20:57.392: INFO: Got endpoints: latency-svc-qpt7l [750.65084ms]
+Jun 20 10:20:57.402: INFO: Created: latency-svc-8xcbq
+Jun 20 10:20:57.443: INFO: Got endpoints: latency-svc-scfc6 [749.78508ms]
+Jun 20 10:20:57.453: INFO: Created: latency-svc-vnfvm
+Jun 20 10:20:57.493: INFO: Got endpoints: latency-svc-j9cbw [750.976005ms]
+Jun 20 10:20:57.504: INFO: Created: latency-svc-znccx
+Jun 20 10:20:57.544: INFO: Got endpoints: latency-svc-lqzrs [752.114885ms]
+Jun 20 10:20:57.556: INFO: Created: latency-svc-fcvzl
+Jun 20 10:20:57.592: INFO: Got endpoints: latency-svc-dbk2v [745.998831ms]
+Jun 20 10:20:57.603: INFO: Created: latency-svc-xtfnt
+Jun 20 10:20:57.643: INFO: Got endpoints: latency-svc-dqq97 [750.364011ms]
+Jun 20 10:20:57.653: INFO: Created: latency-svc-f6bll
+Jun 20 10:20:57.693: INFO: Got endpoints: latency-svc-cmz2n [750.866152ms]
+Jun 20 10:20:57.705: INFO: Created: latency-svc-bm6gr
+Jun 20 10:20:57.742: INFO: Got endpoints: latency-svc-mdf9r [749.347501ms]
+Jun 20 10:20:57.754: INFO: Created: latency-svc-sj5z8
+Jun 20 10:20:57.792: INFO: Got endpoints: latency-svc-fsskb [748.678535ms]
+Jun 20 10:20:57.802: INFO: Created: latency-svc-4f6sz
+Jun 20 10:20:57.844: INFO: Got endpoints: latency-svc-4zvnf [749.143556ms]
+Jun 20 10:20:57.855: INFO: Created: latency-svc-f4lds
+Jun 20 10:20:57.892: INFO: Got endpoints: latency-svc-qbc7c [748.905237ms]
+Jun 20 10:20:57.904: INFO: Created: latency-svc-d2qct
+Jun 20 10:20:57.943: INFO: Got endpoints: latency-svc-tvfvz [751.044487ms]
+Jun 20 10:20:57.953: INFO: Created: latency-svc-868x7
+Jun 20 10:20:57.993: INFO: Got endpoints: latency-svc-s5j9s [749.794311ms]
+Jun 20 10:20:58.004: INFO: Created: latency-svc-77hk8
+Jun 20 10:20:58.042: INFO: Got endpoints: latency-svc-8qx6b [750.069595ms]
+Jun 20 10:20:58.050: INFO: Created: latency-svc-8jpwv
+Jun 20 10:20:58.092: INFO: Got endpoints: latency-svc-zltfl [749.86397ms]
+Jun 20 10:20:58.104: INFO: Created: latency-svc-pqjwv
+Jun 20 10:20:58.142: INFO: Got endpoints: latency-svc-8xcbq [749.730491ms]
+Jun 20 10:20:58.153: INFO: Created: latency-svc-n7kgp
+Jun 20 10:20:58.193: INFO: Got endpoints: latency-svc-vnfvm [749.887714ms]
+Jun 20 10:20:58.204: INFO: Created: latency-svc-9nkn5
+Jun 20 10:20:58.244: INFO: Got endpoints: latency-svc-znccx [750.760679ms]
+Jun 20 10:20:58.255: INFO: Created: latency-svc-xc58r
+Jun 20 10:20:58.292: INFO: Got endpoints: latency-svc-fcvzl [747.647976ms]
+Jun 20 10:20:58.306: INFO: Created: latency-svc-wj8jn
+Jun 20 10:20:58.342: INFO: Got endpoints: latency-svc-xtfnt [749.363964ms]
+Jun 20 10:20:58.352: INFO: Created: latency-svc-jw9sx
+Jun 20 10:20:58.393: INFO: Got endpoints: latency-svc-f6bll [749.856204ms]
+Jun 20 10:20:58.404: INFO: Created: latency-svc-jhgk9
+Jun 20 10:20:58.442: INFO: Got endpoints: latency-svc-bm6gr [748.952709ms]
+Jun 20 10:20:58.453: INFO: Created: latency-svc-2cjgd
+Jun 20 10:20:58.492: INFO: Got endpoints: latency-svc-sj5z8 [749.713015ms]
+Jun 20 10:20:58.502: INFO: Created: latency-svc-stqff
+Jun 20 10:20:58.543: INFO: Got endpoints: latency-svc-4f6sz [751.745241ms]
+Jun 20 10:20:58.555: INFO: Created: latency-svc-6s5kt
+Jun 20 10:20:58.592: INFO: Got endpoints: latency-svc-f4lds [747.83736ms]
+Jun 20 10:20:58.604: INFO: Created: latency-svc-cch6p
+Jun 20 10:20:58.642: INFO: Got endpoints: latency-svc-d2qct [749.972061ms]
+Jun 20 10:20:58.674: INFO: Created: latency-svc-s5c8z
+Jun 20 10:20:58.695: INFO: Got endpoints: latency-svc-868x7 [751.621807ms]
+Jun 20 10:20:58.708: INFO: Created: latency-svc-5tpmg
+Jun 20 10:20:58.745: INFO: Got endpoints: latency-svc-77hk8 [752.669886ms]
+Jun 20 10:20:58.769: INFO: Created: latency-svc-pwnmk
+Jun 20 10:20:58.794: INFO: Got endpoints: latency-svc-8jpwv [751.795371ms]
+Jun 20 10:20:58.804: INFO: Created: latency-svc-b685p
+Jun 20 10:20:58.843: INFO: Got endpoints: latency-svc-pqjwv [749.86975ms]
+Jun 20 10:20:58.856: INFO: Created: latency-svc-54b75
+Jun 20 10:20:58.892: INFO: Got endpoints: latency-svc-n7kgp [749.238369ms]
+Jun 20 10:20:58.903: INFO: Created: latency-svc-pp5b6
+Jun 20 10:20:58.943: INFO: Got endpoints: latency-svc-9nkn5 [749.905578ms]
+Jun 20 10:20:58.952: INFO: Created: latency-svc-w6lf8
+Jun 20 10:20:58.996: INFO: Got endpoints: latency-svc-xc58r [751.945139ms]
+Jun 20 10:20:59.008: INFO: Created: latency-svc-cpj5f
+Jun 20 10:20:59.042: INFO: Got endpoints: latency-svc-wj8jn [749.971455ms]
+Jun 20 10:20:59.054: INFO: Created: latency-svc-74jlk
+Jun 20 10:20:59.093: INFO: Got endpoints: latency-svc-jw9sx [751.521909ms]
+Jun 20 10:20:59.103: INFO: Created: latency-svc-s9x88
+Jun 20 10:20:59.142: INFO: Got endpoints: latency-svc-jhgk9 [749.674145ms]
+Jun 20 10:20:59.152: INFO: Created: latency-svc-wh8f4
+Jun 20 10:20:59.192: INFO: Got endpoints: latency-svc-2cjgd [749.979623ms]
+Jun 20 10:20:59.203: INFO: Created: latency-svc-mkzcf
+Jun 20 10:20:59.243: INFO: Got endpoints: latency-svc-stqff [750.76985ms]
+Jun 20 10:20:59.253: INFO: Created: latency-svc-nddhj
+Jun 20 10:20:59.292: INFO: Got endpoints: latency-svc-6s5kt [748.399107ms]
+Jun 20 10:20:59.301: INFO: Created: latency-svc-wmqcl
+Jun 20 10:20:59.344: INFO: Got endpoints: latency-svc-cch6p [751.512119ms]
+Jun 20 10:20:59.355: INFO: Created: latency-svc-6x6vn
+Jun 20 10:20:59.393: INFO: Got endpoints: latency-svc-s5c8z [751.003378ms]
+Jun 20 10:20:59.404: INFO: Created: latency-svc-p5hwn
+Jun 20 10:20:59.442: INFO: Got endpoints: latency-svc-5tpmg [746.851269ms]
+Jun 20 10:20:59.452: INFO: Created: latency-svc-tmbgk
+Jun 20 10:20:59.491: INFO: Got endpoints: latency-svc-pwnmk [746.062864ms]
+Jun 20 10:20:59.505: INFO: Created: latency-svc-ksxrz
+Jun 20 10:20:59.543: INFO: Got endpoints: latency-svc-b685p [748.64632ms]
+Jun 20 10:20:59.552: INFO: Created: latency-svc-rlpjh
+Jun 20 10:20:59.599: INFO: Got endpoints: latency-svc-54b75 [755.804708ms]
+Jun 20 10:20:59.610: INFO: Created: latency-svc-hs777
+Jun 20 10:20:59.642: INFO: Got endpoints: latency-svc-pp5b6 [750.484416ms]
+Jun 20 10:20:59.654: INFO: Created: latency-svc-8d8hq
+Jun 20 10:20:59.698: INFO: Got endpoints: latency-svc-w6lf8 [755.028065ms]
+Jun 20 10:20:59.709: INFO: Created: latency-svc-zr6dx
+Jun 20 10:20:59.742: INFO: Got endpoints: latency-svc-cpj5f [746.235339ms]
+Jun 20 10:20:59.752: INFO: Created: latency-svc-74t5h
+Jun 20 10:20:59.792: INFO: Got endpoints: latency-svc-74jlk [750.36711ms]
+Jun 20 10:20:59.802: INFO: Created: latency-svc-9l7t5
+Jun 20 10:20:59.843: INFO: Got endpoints: latency-svc-s9x88 [749.735169ms]
+Jun 20 10:20:59.852: INFO: Created: latency-svc-k2fgv
+Jun 20 10:20:59.892: INFO: Got endpoints: latency-svc-wh8f4 [749.298228ms]
+Jun 20 10:20:59.903: INFO: Created: latency-svc-4qx5z
+Jun 20 10:20:59.944: INFO: Got endpoints: latency-svc-mkzcf [751.782071ms]
+Jun 20 10:20:59.955: INFO: Created: latency-svc-lmxnl
+Jun 20 10:20:59.992: INFO: Got endpoints: latency-svc-nddhj [749.183813ms]
+Jun 20 10:21:00.004: INFO: Created: latency-svc-sv79j
+Jun 20 10:21:00.042: INFO: Got endpoints: latency-svc-wmqcl [750.526497ms]
+Jun 20 10:21:00.053: INFO: Created: latency-svc-pltqj
+Jun 20 10:21:00.094: INFO: Got endpoints: latency-svc-6x6vn [749.937743ms]
+Jun 20 10:21:00.105: INFO: Created: latency-svc-rtqmm
+Jun 20 10:21:00.142: INFO: Got endpoints: latency-svc-p5hwn [749.278911ms]
+Jun 20 10:21:00.153: INFO: Created: latency-svc-7nz4g
+Jun 20 10:21:00.193: INFO: Got endpoints: latency-svc-tmbgk [751.377385ms]
+Jun 20 10:21:00.206: INFO: Created: latency-svc-fr4wn
+Jun 20 10:21:00.246: INFO: Got endpoints: latency-svc-ksxrz [754.22219ms]
+Jun 20 10:21:00.259: INFO: Created: latency-svc-qfm29
+Jun 20 10:21:00.293: INFO: Got endpoints: latency-svc-rlpjh [749.859823ms]
+Jun 20 10:21:00.306: INFO: Created: latency-svc-c9xgf
+Jun 20 10:21:00.342: INFO: Got endpoints: latency-svc-hs777 [743.138302ms]
+Jun 20 10:21:00.352: INFO: Created: latency-svc-d65nw
+Jun 20 10:21:00.393: INFO: Got endpoints: latency-svc-8d8hq [750.641753ms]
+Jun 20 10:21:00.405: INFO: Created: latency-svc-tb6fb
+Jun 20 10:21:00.443: INFO: Got endpoints: latency-svc-zr6dx [745.191208ms]
+Jun 20 10:21:00.455: INFO: Created: latency-svc-4xn5r
+Jun 20 10:21:00.492: INFO: Got endpoints: latency-svc-74t5h [749.282556ms]
+Jun 20 10:21:00.502: INFO: Created: latency-svc-w549x
+Jun 20 10:21:00.542: INFO: Got endpoints: latency-svc-9l7t5 [750.078417ms]
+Jun 20 10:21:00.553: INFO: Created: latency-svc-2gbbp
+Jun 20 10:21:00.592: INFO: Got endpoints: latency-svc-k2fgv [749.017325ms]
+Jun 20 10:21:00.602: INFO: Created: latency-svc-gnbgp
+Jun 20 10:21:00.645: INFO: Got endpoints: latency-svc-4qx5z [752.680114ms]
+Jun 20 10:21:00.655: INFO: Created: latency-svc-sklrn
+Jun 20 10:21:00.691: INFO: Got endpoints: latency-svc-lmxnl [747.190632ms]
+Jun 20 10:21:00.704: INFO: Created: latency-svc-q4rt5
+Jun 20 10:21:00.742: INFO: Got endpoints: latency-svc-sv79j [750.178581ms]
+Jun 20 10:21:00.753: INFO: Created: latency-svc-46bq9
+Jun 20 10:21:00.793: INFO: Got endpoints: latency-svc-pltqj [750.363941ms]
+Jun 20 10:21:00.802: INFO: Created: latency-svc-djvjd
+Jun 20 10:21:00.843: INFO: Got endpoints: latency-svc-rtqmm [749.224466ms]
+Jun 20 10:21:00.854: INFO: Created: latency-svc-lvh5j
+Jun 20 10:21:00.892: INFO: Got endpoints: latency-svc-7nz4g [749.803229ms]
+Jun 20 10:21:00.903: INFO: Created: latency-svc-k6cmp
+Jun 20 10:21:00.943: INFO: Got endpoints: latency-svc-fr4wn [749.226796ms]
+Jun 20 10:21:00.953: INFO: Created: latency-svc-n49w5
+Jun 20 10:21:00.993: INFO: Got endpoints: latency-svc-qfm29 [746.824638ms]
+Jun 20 10:21:01.042: INFO: Got endpoints: latency-svc-c9xgf [749.644373ms]
+Jun 20 10:21:01.092: INFO: Got endpoints: latency-svc-d65nw [750.555587ms]
+Jun 20 10:21:01.142: INFO: Got endpoints: latency-svc-tb6fb [749.106523ms]
+Jun 20 10:21:01.192: INFO: Got endpoints: latency-svc-4xn5r [748.523462ms]
+Jun 20 10:21:01.245: INFO: Got endpoints: latency-svc-w549x [753.025137ms]
+Jun 20 10:21:01.292: INFO: Got endpoints: latency-svc-2gbbp [749.435327ms]
+Jun 20 10:21:01.342: INFO: Got endpoints: latency-svc-gnbgp [750.059964ms]
+Jun 20 10:21:01.392: INFO: Got endpoints: latency-svc-sklrn [747.650836ms]
+Jun 20 10:21:01.442: INFO: Got endpoints: latency-svc-q4rt5 [750.70849ms]
+Jun 20 10:21:01.494: INFO: Got endpoints: latency-svc-46bq9 [751.494093ms]
+Jun 20 10:21:01.542: INFO: Got endpoints: latency-svc-djvjd [749.108791ms]
+Jun 20 10:21:01.593: INFO: Got endpoints: latency-svc-lvh5j [750.268947ms]
+Jun 20 10:21:01.642: INFO: Got endpoints: latency-svc-k6cmp [749.461118ms]
+Jun 20 10:21:01.694: INFO: Got endpoints: latency-svc-n49w5 [751.824111ms]
+Jun 20 10:21:01.695: INFO: Latencies: [22.45548ms 26.917421ms 42.855205ms 44.792584ms 54.214207ms 60.066355ms 65.886369ms 75.015129ms 80.113216ms 88.727693ms 96.298597ms 102.125504ms 102.956323ms 107.500681ms 108.155854ms 108.500119ms 110.231331ms 110.874315ms 112.281446ms 112.972742ms 114.321549ms 114.640906ms 115.510166ms 117.394251ms 117.502731ms 117.800536ms 119.275913ms 119.629457ms 120.53771ms 121.814586ms 122.710844ms 125.839388ms 126.384799ms 126.473049ms 150.001932ms 183.72381ms 214.512832ms 272.925031ms 303.121244ms 345.614161ms 387.568991ms 422.8379ms 468.495873ms 510.183126ms 552.509567ms 593.911051ms 636.863956ms 677.517669ms 719.196885ms 736.452276ms 743.138302ms 745.191208ms 745.998831ms 746.062864ms 746.235339ms 746.443935ms 746.467666ms 746.824638ms 746.851269ms 746.956906ms 747.068632ms 747.190632ms 747.404589ms 747.647976ms 747.650836ms 747.83736ms 748.334978ms 748.399107ms 748.523462ms 748.55499ms 748.566855ms 748.573319ms 748.64632ms 748.678535ms 748.789779ms 748.869765ms 748.905237ms 748.952709ms 748.986745ms 748.996391ms 749.017325ms 749.042552ms 749.066869ms 749.087591ms 749.089181ms 749.106523ms 749.108791ms 749.143556ms 749.183813ms 749.198596ms 749.221913ms 749.224466ms 749.226796ms 749.238369ms 749.278911ms 749.282556ms 749.298228ms 749.337839ms 749.347501ms 749.363964ms 749.386443ms 749.435327ms 749.461118ms 749.644373ms 749.654635ms 749.657165ms 749.674145ms 749.685436ms 749.713015ms 749.730491ms 749.735153ms 749.735169ms 749.756312ms 749.78508ms 749.794311ms 749.803229ms 749.844242ms 749.856204ms 749.859823ms 749.86397ms 749.86975ms 749.887714ms 749.905578ms 749.928037ms 749.937743ms 749.971455ms 749.972061ms 749.979623ms 749.985423ms 749.987533ms 750.00994ms 750.018732ms 750.025289ms 750.049871ms 750.059964ms 750.069595ms 750.077057ms 750.078417ms 750.082602ms 750.132563ms 750.141302ms 750.178581ms 750.184271ms 750.201647ms 750.214801ms 750.220904ms 750.264694ms 750.268947ms 750.291861ms 750.339034ms 750.363941ms 750.364011ms 750.36711ms 750.452889ms 750.484416ms 750.523502ms 750.526497ms 750.554213ms 750.555587ms 750.567739ms 750.584654ms 750.641753ms 750.65084ms 750.693525ms 750.70849ms 750.760679ms 750.76985ms 750.776707ms 750.861656ms 750.866152ms 750.966545ms 750.976005ms 751.003378ms 751.044487ms 751.085384ms 751.377385ms 751.382987ms 751.385779ms 751.494093ms 751.512119ms 751.521909ms 751.621807ms 751.74519ms 751.745241ms 751.782071ms 751.795371ms 751.824111ms 751.945139ms 752.114885ms 752.368489ms 752.553327ms 752.669886ms 752.680114ms 752.778583ms 753.025137ms 753.733705ms 754.22219ms 755.028065ms 755.063693ms 755.804708ms]
+Jun 20 10:21:01.695: INFO: 50 %ile: 749.386443ms
+Jun 20 10:21:01.695: INFO: 90 %ile: 751.521909ms
+Jun 20 10:21:01.695: INFO: 99 %ile: 755.063693ms
+Jun 20 10:21:01.695: INFO: Total sample count: 200
+[AfterEach] [sig-network] Service endpoints latency
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:21:01.695: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "svc-latency-1427" for this suite.
+Jun 20 10:21:21.711: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:21:21.780: INFO: namespace svc-latency-1427 deletion completed in 20.081407001s
+
+• [SLOW TEST:30.844 seconds]
+[sig-network] Service endpoints latency
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23
+  should not be very high  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSS
+------------------------------
+[k8s.io] Variable Expansion 
+  should allow substituting values in a container's command [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Variable Expansion
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:21:21.780: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename var-expansion
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should allow substituting values in a container's command [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test substitution in container's command
+Jun 20 10:21:21.821: INFO: Waiting up to 5m0s for pod "var-expansion-4451bfa6-2498-44dd-b912-016fe27c7aec" in namespace "var-expansion-3434" to be "success or failure"
+Jun 20 10:21:21.824: INFO: Pod "var-expansion-4451bfa6-2498-44dd-b912-016fe27c7aec": Phase="Pending", Reason="", readiness=false. Elapsed: 2.97783ms
+Jun 20 10:21:23.828: INFO: Pod "var-expansion-4451bfa6-2498-44dd-b912-016fe27c7aec": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006302925s
+STEP: Saw pod success
+Jun 20 10:21:23.828: INFO: Pod "var-expansion-4451bfa6-2498-44dd-b912-016fe27c7aec" satisfied condition "success or failure"
+Jun 20 10:21:23.836: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod var-expansion-4451bfa6-2498-44dd-b912-016fe27c7aec container dapi-container: 
+STEP: delete the pod
+Jun 20 10:21:23.868: INFO: Waiting for pod var-expansion-4451bfa6-2498-44dd-b912-016fe27c7aec to disappear
+Jun 20 10:21:23.875: INFO: Pod var-expansion-4451bfa6-2498-44dd-b912-016fe27c7aec no longer exists
+[AfterEach] [k8s.io] Variable Expansion
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:21:23.875: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "var-expansion-3434" for this suite.
+Jun 20 10:21:29.900: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:21:29.966: INFO: namespace var-expansion-3434 deletion completed in 6.083294714s
+
+• [SLOW TEST:8.186 seconds]
+[k8s.io] Variable Expansion
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  should allow substituting values in a container's command [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:21:29.966: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test emptydir 0666 on tmpfs
+Jun 20 10:21:30.008: INFO: Waiting up to 5m0s for pod "pod-9032e8d2-5f0a-42e7-a4bf-f137486489eb" in namespace "emptydir-3566" to be "success or failure"
+Jun 20 10:21:30.011: INFO: Pod "pod-9032e8d2-5f0a-42e7-a4bf-f137486489eb": Phase="Pending", Reason="", readiness=false. Elapsed: 3.324203ms
+Jun 20 10:21:32.015: INFO: Pod "pod-9032e8d2-5f0a-42e7-a4bf-f137486489eb": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006736765s
+STEP: Saw pod success
+Jun 20 10:21:32.015: INFO: Pod "pod-9032e8d2-5f0a-42e7-a4bf-f137486489eb" satisfied condition "success or failure"
+Jun 20 10:21:32.017: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-9032e8d2-5f0a-42e7-a4bf-f137486489eb container test-container: 
+STEP: delete the pod
+Jun 20 10:21:32.033: INFO: Waiting for pod pod-9032e8d2-5f0a-42e7-a4bf-f137486489eb to disappear
+Jun 20 10:21:32.037: INFO: Pod pod-9032e8d2-5f0a-42e7-a4bf-f137486489eb no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:21:32.037: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-3566" for this suite.
+Jun 20 10:21:38.051: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:21:38.128: INFO: namespace emptydir-3566 deletion completed in 6.088352521s
+
+• [SLOW TEST:8.162 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] Job 
+  should delete a job [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-apps] Job
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:21:38.128: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename job
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should delete a job [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a job
+STEP: Ensuring active pods == parallelism
+STEP: delete a job
+STEP: deleting Job.batch foo in namespace job-9765, will wait for the garbage collector to delete the pods
+Jun 20 10:21:42.234: INFO: Deleting Job.batch foo took: 6.992634ms
+Jun 20 10:21:42.534: INFO: Terminating Job.batch foo pods took: 300.307702ms
+STEP: Ensuring job was deleted
+[AfterEach] [sig-apps] Job
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:22:17.237: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "job-9765" for this suite.
+Jun 20 10:22:23.250: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:22:23.327: INFO: namespace job-9765 deletion completed in 6.08713437s
+
+• [SLOW TEST:45.199 seconds]
+[sig-apps] Job
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23
+  should delete a job [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSS
+------------------------------
+[k8s.io] Container Runtime blackbox test on terminated container 
+  should report termination message [LinuxOnly] if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Container Runtime
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:22:23.327: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename container-runtime
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should report termination message [LinuxOnly] if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: create the container
+STEP: wait for the container to reach Succeeded
+STEP: get the container status
+STEP: the container should be terminated
+STEP: the termination message should be set
+Jun 20 10:22:25.392: INFO: Expected: &{DONE} to match Container's Termination Message: DONE --
+STEP: delete the container
+[AfterEach] [k8s.io] Container Runtime
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:22:25.405: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-runtime-9114" for this suite.
+Jun 20 10:22:31.418: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:22:31.483: INFO: namespace container-runtime-9114 deletion completed in 6.074901942s
+
+• [SLOW TEST:8.156 seconds]
+[k8s.io] Container Runtime
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  blackbox test
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/runtime.go:38
+    on terminated container
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/runtime.go:129
+      should report termination message [LinuxOnly] if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance]
+      /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SS
+------------------------------
+[sig-storage] Subpath Atomic writer volumes 
+  should support subpaths with projected pod [LinuxOnly] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Subpath
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:22:31.483: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename subpath
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] Atomic writer volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:37
+STEP: Setting up data
+[It] should support subpaths with projected pod [LinuxOnly] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating pod pod-subpath-test-projected-qrd5
+STEP: Creating a pod to test atomic-volume-subpath
+Jun 20 10:22:31.540: INFO: Waiting up to 5m0s for pod "pod-subpath-test-projected-qrd5" in namespace "subpath-2442" to be "success or failure"
+Jun 20 10:22:31.543: INFO: Pod "pod-subpath-test-projected-qrd5": Phase="Pending", Reason="", readiness=false. Elapsed: 3.334545ms
+Jun 20 10:22:33.546: INFO: Pod "pod-subpath-test-projected-qrd5": Phase="Pending", Reason="", readiness=false. Elapsed: 2.006559679s
+Jun 20 10:22:35.550: INFO: Pod "pod-subpath-test-projected-qrd5": Phase="Running", Reason="", readiness=true. Elapsed: 4.009831454s
+Jun 20 10:22:37.553: INFO: Pod "pod-subpath-test-projected-qrd5": Phase="Running", Reason="", readiness=true. Elapsed: 6.013242755s
+Jun 20 10:22:39.557: INFO: Pod "pod-subpath-test-projected-qrd5": Phase="Running", Reason="", readiness=true. Elapsed: 8.016935544s
+Jun 20 10:22:41.560: INFO: Pod "pod-subpath-test-projected-qrd5": Phase="Running", Reason="", readiness=true. Elapsed: 10.020351409s
+Jun 20 10:22:43.564: INFO: Pod "pod-subpath-test-projected-qrd5": Phase="Running", Reason="", readiness=true. Elapsed: 12.024473788s
+Jun 20 10:22:45.567: INFO: Pod "pod-subpath-test-projected-qrd5": Phase="Running", Reason="", readiness=true. Elapsed: 14.02755361s
+Jun 20 10:22:47.571: INFO: Pod "pod-subpath-test-projected-qrd5": Phase="Running", Reason="", readiness=true. Elapsed: 16.030794567s
+Jun 20 10:22:49.574: INFO: Pod "pod-subpath-test-projected-qrd5": Phase="Running", Reason="", readiness=true. Elapsed: 18.033950515s
+Jun 20 10:22:51.577: INFO: Pod "pod-subpath-test-projected-qrd5": Phase="Running", Reason="", readiness=true. Elapsed: 20.037059629s
+Jun 20 10:22:53.582: INFO: Pod "pod-subpath-test-projected-qrd5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 22.042086648s
+STEP: Saw pod success
+Jun 20 10:22:53.582: INFO: Pod "pod-subpath-test-projected-qrd5" satisfied condition "success or failure"
+Jun 20 10:22:53.585: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-subpath-test-projected-qrd5 container test-container-subpath-projected-qrd5: 
+STEP: delete the pod
+Jun 20 10:22:53.602: INFO: Waiting for pod pod-subpath-test-projected-qrd5 to disappear
+Jun 20 10:22:53.605: INFO: Pod pod-subpath-test-projected-qrd5 no longer exists
+STEP: Deleting pod pod-subpath-test-projected-qrd5
+Jun 20 10:22:53.605: INFO: Deleting pod "pod-subpath-test-projected-qrd5" in namespace "subpath-2442"
+[AfterEach] [sig-storage] Subpath
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:22:53.608: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "subpath-2442" for this suite.
+Jun 20 10:22:59.620: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:22:59.685: INFO: namespace subpath-2442 deletion completed in 6.074520052s
+
+• [SLOW TEST:28.202 seconds]
+[sig-storage] Subpath
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22
+  Atomic writer volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:33
+    should support subpaths with projected pod [LinuxOnly] [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl label 
+  should update the label on a resource  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:22:59.685: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:221
+[BeforeEach] [k8s.io] Kubectl label
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1211
+STEP: creating the pod
+Jun 20 10:22:59.722: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 create -f - --namespace=kubectl-5697'
+Jun 20 10:23:00.016: INFO: stderr: ""
+Jun 20 10:23:00.016: INFO: stdout: "pod/pause created\n"
+Jun 20 10:23:00.016: INFO: Waiting up to 5m0s for 1 pods to be running and ready: [pause]
+Jun 20 10:23:00.016: INFO: Waiting up to 5m0s for pod "pause" in namespace "kubectl-5697" to be "running and ready"
+Jun 20 10:23:00.020: INFO: Pod "pause": Phase="Pending", Reason="", readiness=false. Elapsed: 4.03973ms
+Jun 20 10:23:02.023: INFO: Pod "pause": Phase="Pending", Reason="", readiness=false. Elapsed: 2.007544228s
+Jun 20 10:23:04.026: INFO: Pod "pause": Phase="Running", Reason="", readiness=true. Elapsed: 4.010542782s
+Jun 20 10:23:04.026: INFO: Pod "pause" satisfied condition "running and ready"
+Jun 20 10:23:04.026: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [pause]
+[It] should update the label on a resource  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: adding the label testing-label with value testing-label-value to a pod
+Jun 20 10:23:04.026: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 label pods pause testing-label=testing-label-value --namespace=kubectl-5697'
+Jun 20 10:23:04.110: INFO: stderr: ""
+Jun 20 10:23:04.110: INFO: stdout: "pod/pause labeled\n"
+STEP: verifying the pod has the label testing-label with the value testing-label-value
+Jun 20 10:23:04.110: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pod pause -L testing-label --namespace=kubectl-5697'
+Jun 20 10:23:04.290: INFO: stderr: ""
+Jun 20 10:23:04.290: INFO: stdout: "NAME    READY   STATUS    RESTARTS   AGE   TESTING-LABEL\npause   1/1     Running   0          4s    testing-label-value\n"
+STEP: removing the label testing-label of a pod
+Jun 20 10:23:04.290: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 label pods pause testing-label- --namespace=kubectl-5697'
+Jun 20 10:23:04.388: INFO: stderr: ""
+Jun 20 10:23:04.388: INFO: stdout: "pod/pause labeled\n"
+STEP: verifying the pod doesn't have the label testing-label
+Jun 20 10:23:04.388: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pod pause -L testing-label --namespace=kubectl-5697'
+Jun 20 10:23:04.484: INFO: stderr: ""
+Jun 20 10:23:04.484: INFO: stdout: "NAME    READY   STATUS    RESTARTS   AGE   TESTING-LABEL\npause   1/1     Running   0          4s    \n"
+[AfterEach] [k8s.io] Kubectl label
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1218
+STEP: using delete to clean up resources
+Jun 20 10:23:04.485: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 delete --grace-period=0 --force -f - --namespace=kubectl-5697'
+Jun 20 10:23:04.612: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+Jun 20 10:23:04.612: INFO: stdout: "pod \"pause\" force deleted\n"
+Jun 20 10:23:04.612: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get rc,svc -l name=pause --no-headers --namespace=kubectl-5697'
+Jun 20 10:23:04.702: INFO: stderr: "No resources found.\n"
+Jun 20 10:23:04.702: INFO: stdout: ""
+Jun 20 10:23:04.702: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods -l name=pause --namespace=kubectl-5697 -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}'
+Jun 20 10:23:04.781: INFO: stderr: ""
+Jun 20 10:23:04.782: INFO: stdout: ""
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:23:04.782: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-5697" for this suite.
+Jun 20 10:23:10.795: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:23:10.865: INFO: namespace kubectl-5697 deletion completed in 6.079559735s
+
+• [SLOW TEST:11.179 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl label
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+    should update the label on a resource  [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSS
+------------------------------
+[sig-storage] Downward API volume 
+  should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:23:10.865: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test downward API volume plugin
+Jun 20 10:23:10.905: INFO: Waiting up to 5m0s for pod "downwardapi-volume-2376dee4-dec9-4b89-b73b-c0dad634d7b9" in namespace "downward-api-768" to be "success or failure"
+Jun 20 10:23:10.909: INFO: Pod "downwardapi-volume-2376dee4-dec9-4b89-b73b-c0dad634d7b9": Phase="Pending", Reason="", readiness=false. Elapsed: 3.55953ms
+Jun 20 10:23:12.912: INFO: Pod "downwardapi-volume-2376dee4-dec9-4b89-b73b-c0dad634d7b9": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006758525s
+STEP: Saw pod success
+Jun 20 10:23:12.912: INFO: Pod "downwardapi-volume-2376dee4-dec9-4b89-b73b-c0dad634d7b9" satisfied condition "success or failure"
+Jun 20 10:23:12.914: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod downwardapi-volume-2376dee4-dec9-4b89-b73b-c0dad634d7b9 container client-container: 
+STEP: delete the pod
+Jun 20 10:23:12.931: INFO: Waiting for pod downwardapi-volume-2376dee4-dec9-4b89-b73b-c0dad634d7b9 to disappear
+Jun 20 10:23:12.934: INFO: Pod downwardapi-volume-2376dee4-dec9-4b89-b73b-c0dad634d7b9 no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:23:12.934: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-768" for this suite.
+Jun 20 10:23:18.947: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:23:19.013: INFO: namespace downward-api-768 deletion completed in 6.076587114s
+
+• [SLOW TEST:8.148 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSS
+------------------------------
+[sig-storage] Secrets 
+  optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Secrets
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:23:19.013: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename secrets
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating secret with name s-test-opt-del-fd87d3ed-8429-4e25-b581-af1041feb5b0
+STEP: Creating secret with name s-test-opt-upd-4c194909-8bb6-4599-b931-3bdc66c10e46
+STEP: Creating the pod
+STEP: Deleting secret s-test-opt-del-fd87d3ed-8429-4e25-b581-af1041feb5b0
+STEP: Updating secret s-test-opt-upd-4c194909-8bb6-4599-b931-3bdc66c10e46
+STEP: Creating secret with name s-test-opt-create-7c60b32e-e801-418f-a2a1-a5b7e158d07b
+STEP: waiting to observe update in volume
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:24:43.483: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "secrets-6056" for this suite.
+Jun 20 10:25:05.496: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:25:05.576: INFO: namespace secrets-6056 deletion completed in 22.089561117s
+
+• [SLOW TEST:106.563 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:33
+  optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] Daemon set [Serial] 
+  should retry creating failed daemon pods [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:25:05.576: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename daemonsets
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:103
+[It] should retry creating failed daemon pods [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a simple DaemonSet "daemon-set"
+STEP: Check that daemon pods launch on every node of the cluster.
+Jun 20 10:25:05.629: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:25:05.629: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:25:05.630: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:25:05.632: INFO: Number of nodes with available pods: 0
+Jun 20 10:25:05.633: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 10:25:06.636: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:25:06.636: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:25:06.636: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:25:06.639: INFO: Number of nodes with available pods: 0
+Jun 20 10:25:06.639: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 10:25:07.636: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:25:07.636: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:25:07.636: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:25:07.639: INFO: Number of nodes with available pods: 2
+Jun 20 10:25:07.639: INFO: Number of running nodes: 2, number of available pods: 2
+STEP: Set a daemon pod's phase to 'Failed', check that the daemon pod is revived.
+Jun 20 10:25:07.653: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:25:07.653: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:25:07.653: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:25:07.655: INFO: Number of nodes with available pods: 2
+Jun 20 10:25:07.655: INFO: Number of running nodes: 2, number of available pods: 2
+STEP: Wait for the failed daemon pod to be completely deleted.
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:69
+STEP: Deleting DaemonSet "daemon-set"
+STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-1542, will wait for the garbage collector to delete the pods
+Jun 20 10:25:08.723: INFO: Deleting DaemonSet.extensions daemon-set took: 6.744853ms
+Jun 20 10:25:09.024: INFO: Terminating DaemonSet.extensions daemon-set pods took: 300.218321ms
+Jun 20 10:26:27.227: INFO: Number of nodes with available pods: 0
+Jun 20 10:26:27.227: INFO: Number of running nodes: 0, number of available pods: 0
+Jun 20 10:26:27.229: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/daemonsets-1542/daemonsets","resourceVersion":"13829"},"items":null}
+
+Jun 20 10:26:27.231: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/daemonsets-1542/pods","resourceVersion":"13829"},"items":null}
+
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:26:27.238: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "daemonsets-1542" for this suite.
+Jun 20 10:26:33.252: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:26:33.319: INFO: namespace daemonsets-1542 deletion completed in 6.078924377s
+
+• [SLOW TEST:87.743 seconds]
+[sig-apps] Daemon set [Serial]
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23
+  should retry creating failed daemon pods [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSS
+------------------------------
+[sig-storage] Subpath Atomic writer volumes 
+  should support subpaths with configmap pod [LinuxOnly] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Subpath
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:26:33.319: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename subpath
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] Atomic writer volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:37
+STEP: Setting up data
+[It] should support subpaths with configmap pod [LinuxOnly] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating pod pod-subpath-test-configmap-9gmm
+STEP: Creating a pod to test atomic-volume-subpath
+Jun 20 10:26:33.366: INFO: Waiting up to 5m0s for pod "pod-subpath-test-configmap-9gmm" in namespace "subpath-5854" to be "success or failure"
+Jun 20 10:26:33.370: INFO: Pod "pod-subpath-test-configmap-9gmm": Phase="Pending", Reason="", readiness=false. Elapsed: 4.060836ms
+Jun 20 10:26:35.373: INFO: Pod "pod-subpath-test-configmap-9gmm": Phase="Pending", Reason="", readiness=false. Elapsed: 2.007190914s
+Jun 20 10:26:37.376: INFO: Pod "pod-subpath-test-configmap-9gmm": Phase="Running", Reason="", readiness=true. Elapsed: 4.010384316s
+Jun 20 10:26:39.380: INFO: Pod "pod-subpath-test-configmap-9gmm": Phase="Running", Reason="", readiness=true. Elapsed: 6.013817005s
+Jun 20 10:26:41.383: INFO: Pod "pod-subpath-test-configmap-9gmm": Phase="Running", Reason="", readiness=true. Elapsed: 8.016770662s
+Jun 20 10:26:43.386: INFO: Pod "pod-subpath-test-configmap-9gmm": Phase="Running", Reason="", readiness=true. Elapsed: 10.020069939s
+Jun 20 10:26:45.389: INFO: Pod "pod-subpath-test-configmap-9gmm": Phase="Running", Reason="", readiness=true. Elapsed: 12.023434431s
+Jun 20 10:26:47.392: INFO: Pod "pod-subpath-test-configmap-9gmm": Phase="Running", Reason="", readiness=true. Elapsed: 14.026469697s
+Jun 20 10:26:49.396: INFO: Pod "pod-subpath-test-configmap-9gmm": Phase="Running", Reason="", readiness=true. Elapsed: 16.02986456s
+Jun 20 10:26:51.399: INFO: Pod "pod-subpath-test-configmap-9gmm": Phase="Running", Reason="", readiness=true. Elapsed: 18.0328774s
+Jun 20 10:26:53.404: INFO: Pod "pod-subpath-test-configmap-9gmm": Phase="Running", Reason="", readiness=true. Elapsed: 20.037762937s
+Jun 20 10:26:55.407: INFO: Pod "pod-subpath-test-configmap-9gmm": Phase="Succeeded", Reason="", readiness=false. Elapsed: 22.040793481s
+STEP: Saw pod success
+Jun 20 10:26:55.407: INFO: Pod "pod-subpath-test-configmap-9gmm" satisfied condition "success or failure"
+Jun 20 10:26:55.409: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-subpath-test-configmap-9gmm container test-container-subpath-configmap-9gmm: 
+STEP: delete the pod
+Jun 20 10:26:55.429: INFO: Waiting for pod pod-subpath-test-configmap-9gmm to disappear
+Jun 20 10:26:55.432: INFO: Pod pod-subpath-test-configmap-9gmm no longer exists
+STEP: Deleting pod pod-subpath-test-configmap-9gmm
+Jun 20 10:26:55.432: INFO: Deleting pod "pod-subpath-test-configmap-9gmm" in namespace "subpath-5854"
+[AfterEach] [sig-storage] Subpath
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:26:55.434: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "subpath-5854" for this suite.
+Jun 20 10:27:01.448: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:27:01.557: INFO: namespace subpath-5854 deletion completed in 6.119997968s
+
+• [SLOW TEST:28.238 seconds]
+[sig-storage] Subpath
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22
+  Atomic writer volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:33
+    should support subpaths with configmap pod [LinuxOnly] [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SS
+------------------------------
+[sig-api-machinery] Secrets 
+  should be consumable via the environment [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-api-machinery] Secrets
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:27:01.557: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename secrets
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable via the environment [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: creating secret secrets-6212/secret-test-fe330744-9dc5-4876-924a-10a9ccebaf61
+STEP: Creating a pod to test consume secrets
+Jun 20 10:27:01.608: INFO: Waiting up to 5m0s for pod "pod-configmaps-c8efd542-e524-48b8-b395-a6c62b77680a" in namespace "secrets-6212" to be "success or failure"
+Jun 20 10:27:01.611: INFO: Pod "pod-configmaps-c8efd542-e524-48b8-b395-a6c62b77680a": Phase="Pending", Reason="", readiness=false. Elapsed: 3.268746ms
+Jun 20 10:27:03.615: INFO: Pod "pod-configmaps-c8efd542-e524-48b8-b395-a6c62b77680a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.006557635s
+Jun 20 10:27:05.618: INFO: Pod "pod-configmaps-c8efd542-e524-48b8-b395-a6c62b77680a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.009814783s
+STEP: Saw pod success
+Jun 20 10:27:05.618: INFO: Pod "pod-configmaps-c8efd542-e524-48b8-b395-a6c62b77680a" satisfied condition "success or failure"
+Jun 20 10:27:05.620: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-configmaps-c8efd542-e524-48b8-b395-a6c62b77680a container env-test: 
+STEP: delete the pod
+Jun 20 10:27:05.641: INFO: Waiting for pod pod-configmaps-c8efd542-e524-48b8-b395-a6c62b77680a to disappear
+Jun 20 10:27:05.647: INFO: Pod pod-configmaps-c8efd542-e524-48b8-b395-a6c62b77680a no longer exists
+[AfterEach] [sig-api-machinery] Secrets
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:27:05.647: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "secrets-6212" for this suite.
+Jun 20 10:27:11.670: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:27:11.736: INFO: namespace secrets-6212 deletion completed in 6.083260689s
+
+• [SLOW TEST:10.179 seconds]
+[sig-api-machinery] Secrets
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets.go:31
+  should be consumable via the environment [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Pods 
+  should get a host IP [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:27:11.736: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename pods
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:164
+[It] should get a host IP [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: creating pod
+Jun 20 10:27:15.787: INFO: Pod pod-hostip-9f821481-dc64-4228-a04c-641841d27201 has hostIP: 10.100.10.111
+[AfterEach] [k8s.io] Pods
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:27:15.787: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pods-5964" for this suite.
+Jun 20 10:27:37.803: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:27:37.870: INFO: namespace pods-5964 deletion completed in 22.080434279s
+
+• [SLOW TEST:26.134 seconds]
+[k8s.io] Pods
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  should get a host IP [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected secret 
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Projected secret
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:27:37.871: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating projection with secret that has name projected-secret-test-map-be7a07a6-aa24-46bd-93ad-a7ed2407b3a8
+STEP: Creating a pod to test consume secrets
+Jun 20 10:27:37.931: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-27032484-7868-4e52-8f18-6b69be34bbc2" in namespace "projected-9522" to be "success or failure"
+Jun 20 10:27:37.933: INFO: Pod "pod-projected-secrets-27032484-7868-4e52-8f18-6b69be34bbc2": Phase="Pending", Reason="", readiness=false. Elapsed: 2.746613ms
+Jun 20 10:27:39.937: INFO: Pod "pod-projected-secrets-27032484-7868-4e52-8f18-6b69be34bbc2": Phase="Pending", Reason="", readiness=false. Elapsed: 2.006345428s
+Jun 20 10:27:41.940: INFO: Pod "pod-projected-secrets-27032484-7868-4e52-8f18-6b69be34bbc2": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.009510909s
+STEP: Saw pod success
+Jun 20 10:27:41.940: INFO: Pod "pod-projected-secrets-27032484-7868-4e52-8f18-6b69be34bbc2" satisfied condition "success or failure"
+Jun 20 10:27:41.943: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-projected-secrets-27032484-7868-4e52-8f18-6b69be34bbc2 container projected-secret-volume-test: 
+STEP: delete the pod
+Jun 20 10:27:41.959: INFO: Waiting for pod pod-projected-secrets-27032484-7868-4e52-8f18-6b69be34bbc2 to disappear
+Jun 20 10:27:41.962: INFO: Pod pod-projected-secrets-27032484-7868-4e52-8f18-6b69be34bbc2 no longer exists
+[AfterEach] [sig-storage] Projected secret
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:27:41.962: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-9522" for this suite.
+Jun 20 10:27:47.974: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:27:48.039: INFO: namespace projected-9522 deletion completed in 6.073951016s
+
+• [SLOW TEST:10.168 seconds]
+[sig-storage] Projected secret
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:33
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  pod should support shared volumes between containers [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:27:48.039: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] pod should support shared volumes between containers [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating Pod
+STEP: Waiting for the pod running
+STEP: Geting the pod
+STEP: Reading file content from the nginx-container
+Jun 20 10:27:52.106: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 exec pod-sharedvolume-96815d29-aba1-4ec0-a820-85abadc627f0 -c busybox-main-container --namespace=emptydir-7609 -- cat /usr/share/volumeshare/shareddata.txt'
+Jun 20 10:27:52.514: INFO: stderr: ""
+Jun 20 10:27:52.514: INFO: stdout: "Hello from the busy-box sub-container\n"
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:27:52.514: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-7609" for this suite.
+Jun 20 10:27:58.529: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:27:58.601: INFO: namespace emptydir-7609 deletion completed in 6.082934753s
+
+• [SLOW TEST:10.563 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  pod should support shared volumes between containers [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:27:58.601: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test downward API volume plugin
+Jun 20 10:27:58.640: INFO: Waiting up to 5m0s for pod "downwardapi-volume-d0785176-8d54-4695-a772-8e218e28eea7" in namespace "projected-1991" to be "success or failure"
+Jun 20 10:27:58.643: INFO: Pod "downwardapi-volume-d0785176-8d54-4695-a772-8e218e28eea7": Phase="Pending", Reason="", readiness=false. Elapsed: 3.172096ms
+Jun 20 10:28:00.646: INFO: Pod "downwardapi-volume-d0785176-8d54-4695-a772-8e218e28eea7": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006484754s
+STEP: Saw pod success
+Jun 20 10:28:00.646: INFO: Pod "downwardapi-volume-d0785176-8d54-4695-a772-8e218e28eea7" satisfied condition "success or failure"
+Jun 20 10:28:00.649: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod downwardapi-volume-d0785176-8d54-4695-a772-8e218e28eea7 container client-container: 
+STEP: delete the pod
+Jun 20 10:28:00.670: INFO: Waiting for pod downwardapi-volume-d0785176-8d54-4695-a772-8e218e28eea7 to disappear
+Jun 20 10:28:00.673: INFO: Pod downwardapi-volume-d0785176-8d54-4695-a772-8e218e28eea7 no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:28:00.673: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-1991" for this suite.
+Jun 20 10:28:06.687: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:28:06.763: INFO: namespace projected-1991 deletion completed in 6.085362663s
+
+• [SLOW TEST:8.161 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl cluster-info 
+  should check if Kubernetes master services is included in cluster-info  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:28:06.763: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:221
+[It] should check if Kubernetes master services is included in cluster-info  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: validating cluster-info
+Jun 20 10:28:06.798: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 cluster-info'
+Jun 20 10:28:06.865: INFO: stderr: ""
+Jun 20 10:28:06.865: INFO: stdout: "\x1b[0;32mKubernetes master\x1b[0m is running at \x1b[0;33mhttps://10.96.0.1:443\x1b[0m\n\x1b[0;32mKubeDNS\x1b[0m is running at \x1b[0;33mhttps://10.96.0.1:443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy\x1b[0m\n\nTo further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:28:06.865: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-6353" for this suite.
+Jun 20 10:28:12.881: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:28:12.949: INFO: namespace kubectl-6353 deletion completed in 6.080885213s
+
+• [SLOW TEST:6.186 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl cluster-info
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+    should check if Kubernetes master services is included in cluster-info  [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-network] Networking Granular Checks: Pods 
+  should function for intra-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-network] Networking
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:28:12.950: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename pod-network-test
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should function for intra-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Performing setup for networking test in namespace pod-network-test-5849
+STEP: creating a selector
+STEP: Creating the service pods in kubernetes
+Jun 20 10:28:12.982: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable
+STEP: Creating test pods
+Jun 20 10:28:35.052: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.38.0.3:8080/dial?request=hostName&protocol=http&host=10.34.0.2&port=8080&tries=1'] Namespace:pod-network-test-5849 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 20 10:28:35.052: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+Jun 20 10:28:35.141: INFO: Waiting for endpoints: map[]
+Jun 20 10:28:35.144: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.38.0.3:8080/dial?request=hostName&protocol=http&host=10.38.0.2&port=8080&tries=1'] Namespace:pod-network-test-5849 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 20 10:28:35.144: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+Jun 20 10:28:35.236: INFO: Waiting for endpoints: map[]
+[AfterEach] [sig-network] Networking
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:28:35.236: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pod-network-test-5849" for this suite.
+Jun 20 10:28:57.251: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:28:57.323: INFO: namespace pod-network-test-5849 deletion completed in 22.083145924s
+
+• [SLOW TEST:44.373 seconds]
+[sig-network] Networking
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:25
+  Granular Checks: Pods
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:28
+    should function for intra-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl describe 
+  should check if kubectl describe prints relevant information for rc and pods  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:28:57.323: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:221
+[It] should check if kubectl describe prints relevant information for rc and pods  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+Jun 20 10:28:57.356: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 create -f - --namespace=kubectl-6352'
+Jun 20 10:28:57.560: INFO: stderr: ""
+Jun 20 10:28:57.560: INFO: stdout: "replicationcontroller/redis-master created\n"
+Jun 20 10:28:57.560: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 create -f - --namespace=kubectl-6352'
+Jun 20 10:28:57.768: INFO: stderr: ""
+Jun 20 10:28:57.768: INFO: stdout: "service/redis-master created\n"
+STEP: Waiting for Redis master to start.
+Jun 20 10:28:58.773: INFO: Selector matched 1 pods for map[app:redis]
+Jun 20 10:28:58.773: INFO: Found 0 / 1
+Jun 20 10:28:59.772: INFO: Selector matched 1 pods for map[app:redis]
+Jun 20 10:28:59.772: INFO: Found 1 / 1
+Jun 20 10:28:59.772: INFO: WaitFor completed with timeout 5m0s.  Pods found = 1 out of 1
+Jun 20 10:28:59.774: INFO: Selector matched 1 pods for map[app:redis]
+Jun 20 10:28:59.774: INFO: ForEach: Found 1 pods from the filter.  Now looping through them.
+Jun 20 10:28:59.774: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 describe pod redis-master-4fq29 --namespace=kubectl-6352'
+Jun 20 10:28:59.853: INFO: stderr: ""
+Jun 20 10:28:59.853: INFO: stdout: "Name:           redis-master-4fq29\nNamespace:      kubectl-6352\nPriority:       0\nNode:           ip-10-100-10-111.eu-west-1.compute.internal/10.100.10.111\nStart Time:     Thu, 20 Jun 2019 10:28:57 +0000\nLabels:         app=redis\n                role=master\nAnnotations:    \nStatus:         Running\nIP:             10.38.0.2\nControlled By:  ReplicationController/redis-master\nContainers:\n  redis-master:\n    Container ID:   docker://8172639b40395703432fd5650b8e0e02ed915cb55e0804f4697e7721eb333cae\n    Image:          gcr.io/kubernetes-e2e-test-images/redis:1.0\n    Image ID:       docker-pullable://gcr.io/kubernetes-e2e-test-images/redis@sha256:af4748d1655c08dc54d4be5182135395db9ce87aba2d4699b26b14ae197c5830\n    Port:           6379/TCP\n    Host Port:      0/TCP\n    State:          Running\n      Started:      Thu, 20 Jun 2019 10:28:58 +0000\n    Ready:          True\n    Restart Count:  0\n    Environment:    \n    Mounts:\n      /var/run/secrets/kubernetes.io/serviceaccount from default-token-mwst5 (ro)\nConditions:\n  Type              Status\n  Initialized       True \n  Ready             True \n  ContainersReady   True \n  PodScheduled      True \nVolumes:\n  default-token-mwst5:\n    Type:        Secret (a volume populated by a Secret)\n    SecretName:  default-token-mwst5\n    Optional:    false\nQoS Class:       BestEffort\nNode-Selectors:  \nTolerations:     node.kubernetes.io/not-ready:NoExecute for 300s\n                 node.kubernetes.io/unreachable:NoExecute for 300s\nEvents:\n  Type    Reason     Age   From                                                  Message\n  ----    ------     ----  ----                                                  -------\n  Normal  Scheduled  2s    default-scheduler                                     Successfully assigned kubectl-6352/redis-master-4fq29 to ip-10-100-10-111.eu-west-1.compute.internal\n  Normal  Pulled     1s    kubelet, ip-10-100-10-111.eu-west-1.compute.internal  Container image \"gcr.io/kubernetes-e2e-test-images/redis:1.0\" already present on machine\n  Normal  Created    1s    kubelet, ip-10-100-10-111.eu-west-1.compute.internal  Created container redis-master\n  Normal  Started    1s    kubelet, ip-10-100-10-111.eu-west-1.compute.internal  Started container redis-master\n"
+Jun 20 10:28:59.853: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 describe rc redis-master --namespace=kubectl-6352'
+Jun 20 10:28:59.937: INFO: stderr: ""
+Jun 20 10:28:59.937: INFO: stdout: "Name:         redis-master\nNamespace:    kubectl-6352\nSelector:     app=redis,role=master\nLabels:       app=redis\n              role=master\nAnnotations:  \nReplicas:     1 current / 1 desired\nPods Status:  1 Running / 0 Waiting / 0 Succeeded / 0 Failed\nPod Template:\n  Labels:  app=redis\n           role=master\n  Containers:\n   redis-master:\n    Image:        gcr.io/kubernetes-e2e-test-images/redis:1.0\n    Port:         6379/TCP\n    Host Port:    0/TCP\n    Environment:  \n    Mounts:       \n  Volumes:        \nEvents:\n  Type    Reason            Age   From                    Message\n  ----    ------            ----  ----                    -------\n  Normal  SuccessfulCreate  2s    replication-controller  Created pod: redis-master-4fq29\n"
+Jun 20 10:28:59.937: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 describe service redis-master --namespace=kubectl-6352'
+Jun 20 10:29:00.020: INFO: stderr: ""
+Jun 20 10:29:00.020: INFO: stdout: "Name:              redis-master\nNamespace:         kubectl-6352\nLabels:            app=redis\n                   role=master\nAnnotations:       \nSelector:          app=redis,role=master\nType:              ClusterIP\nIP:                10.96.217.118\nPort:                6379/TCP\nTargetPort:        redis-server/TCP\nEndpoints:         10.38.0.2:6379\nSession Affinity:  None\nEvents:            \n"
+Jun 20 10:29:00.025: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 describe node ip-10-100-10-111.eu-west-1.compute.internal'
+Jun 20 10:29:00.173: INFO: stderr: ""
+Jun 20 10:29:00.173: INFO: stdout: "Name:               ip-10-100-10-111.eu-west-1.compute.internal\nRoles:              \nLabels:             beta.kubernetes.io/arch=amd64\n                    beta.kubernetes.io/instance-type=t3.medium\n                    beta.kubernetes.io/os=linux\n                    failure-domain.beta.kubernetes.io/region=eu-west-1\n                    failure-domain.beta.kubernetes.io/zone=eu-west-1a\n                    kubernetes.io/arch=amd64\n                    kubernetes.io/hostname=ip-10-100-10-111.eu-west-1.compute.internal\n                    kubernetes.io/os=linux\n                    node-kind.sighup.io/gated=\n                    node-kind.sighup.io/infra=\n                    node-kind.sighup.io/nodes=\nAnnotations:        kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock\n                    node.alpha.kubernetes.io/ttl: 0\n                    volumes.kubernetes.io/controller-managed-attach-detach: true\nCreationTimestamp:  Thu, 20 Jun 2019 09:12:47 +0000\nTaints:             \nUnschedulable:      false\nConditions:\n  Type                 Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message\n  ----                 ------  -----------------                 ------------------                ------                       -------\n  NetworkUnavailable   False   Thu, 20 Jun 2019 09:35:40 +0000   Thu, 20 Jun 2019 09:35:40 +0000   WeaveIsUp                    Weave pod has set this\n  MemoryPressure       False   Thu, 20 Jun 2019 10:28:19 +0000   Thu, 20 Jun 2019 09:12:47 +0000   KubeletHasSufficientMemory   kubelet has sufficient memory available\n  DiskPressure         False   Thu, 20 Jun 2019 10:28:19 +0000   Thu, 20 Jun 2019 09:12:47 +0000   KubeletHasNoDiskPressure     kubelet has no disk pressure\n  PIDPressure          False   Thu, 20 Jun 2019 10:28:19 +0000   Thu, 20 Jun 2019 09:12:47 +0000   KubeletHasSufficientPID      kubelet has sufficient PID available\n  Ready                True    Thu, 20 Jun 2019 10:28:19 +0000   Thu, 20 Jun 2019 09:35:47 +0000   KubeletReady                 kubelet is posting ready status. AppArmor enabled\nAddresses:\n  InternalIP:   10.100.10.111\n  InternalDNS:  ip-10-100-10-111.eu-west-1.compute.internal\n  Hostname:     ip-10-100-10-111.eu-west-1.compute.internal\nCapacity:\n attachable-volumes-aws-ebs:  25\n cpu:                         2\n ephemeral-storage:           81253764Ki\n hugepages-1Gi:               0\n hugepages-2Mi:               0\n memory:                      3978608Ki\n pods:                        110\nAllocatable:\n attachable-volumes-aws-ebs:  25\n cpu:                         2\n ephemeral-storage:           74883468779\n hugepages-1Gi:               0\n hugepages-2Mi:               0\n memory:                      3876208Ki\n pods:                        110\nSystem Info:\n Machine ID:                 ec206169f70fb7fffc41f7f3db19ef65\n System UUID:                EC206169-F70F-B7FF-FC41-F7F3DB19EF65\n Boot ID:                    ae5704a8-7ca3-44b8-8213-90edf1d509e8\n Kernel Version:             4.15.0-1032-aws\n OS Image:                   Ubuntu 18.04.2 LTS\n Operating System:           linux\n Architecture:               amd64\n Container Runtime Version:  docker://18.6.2\n Kubelet Version:            v1.15.0\n Kube-Proxy Version:         v1.15.0\nPodCIDR:                     10.32.3.0/24\nProviderID:                  aws:///eu-west-1a/i-0237e018b3fe38668\nNon-terminated Pods:         (5 in total)\n  Namespace                  Name                                                       CPU Requests  CPU Limits  Memory Requests  Memory Limits  AGE\n  ---------                  ----                                                       ------------  ----------  ---------------  -------------  ---\n  heptio-sonobuoy            sonobuoy                                                   0 (0%)        0 (0%)      0 (0%)           0 (0%)         29m\n  heptio-sonobuoy            sonobuoy-systemd-logs-daemon-set-de69c0149d564b9d-fz4v7    0 (0%)        0 (0%)      0 (0%)           0 (0%)         29m\n  kube-system                kube-proxy-9j68g                                           0 (0%)        0 (0%)      0 (0%)           0 (0%)         76m\n  kube-system                weave-net-nh2zg                                            20m (1%)      0 (0%)      0 (0%)           0 (0%)         53m\n  kubectl-6352               redis-master-4fq29                                         0 (0%)        0 (0%)      0 (0%)           0 (0%)         3s\nAllocated resources:\n  (Total limits may be over 100 percent, i.e., overcommitted.)\n  Resource                    Requests  Limits\n  --------                    --------  ------\n  cpu                         20m (1%)  0 (0%)\n  memory                      0 (0%)    0 (0%)\n  ephemeral-storage           0 (0%)    0 (0%)\n  attachable-volumes-aws-ebs  0         0\nEvents:\n  Type    Reason     Age   From                                                  Message\n  ----    ------     ----  ----                                                  -------\n  Normal  NodeReady  53m   kubelet, ip-10-100-10-111.eu-west-1.compute.internal  Node ip-10-100-10-111.eu-west-1.compute.internal status is now: NodeReady\n"
+Jun 20 10:29:00.173: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 describe namespace kubectl-6352'
+Jun 20 10:29:00.294: INFO: stderr: ""
+Jun 20 10:29:00.294: INFO: stdout: "Name:         kubectl-6352\nLabels:       e2e-framework=kubectl\n              e2e-run=731aade8-62cc-405e-9385-a3927c938cdd\nAnnotations:  \nStatus:       Active\n\nNo resource quota.\n\nNo resource limits.\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:29:00.294: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-6352" for this suite.
+Jun 20 10:29:22.307: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:29:22.394: INFO: namespace kubectl-6352 deletion completed in 22.096717914s
+
+• [SLOW TEST:25.071 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl describe
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+    should check if kubectl describe prints relevant information for rc and pods  [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:29:22.395: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test emptydir 0644 on tmpfs
+Jun 20 10:29:22.433: INFO: Waiting up to 5m0s for pod "pod-b4b6cfeb-ee6f-4eaf-8c5d-1c4569d4e974" in namespace "emptydir-8793" to be "success or failure"
+Jun 20 10:29:22.436: INFO: Pod "pod-b4b6cfeb-ee6f-4eaf-8c5d-1c4569d4e974": Phase="Pending", Reason="", readiness=false. Elapsed: 2.882536ms
+Jun 20 10:29:24.440: INFO: Pod "pod-b4b6cfeb-ee6f-4eaf-8c5d-1c4569d4e974": Phase="Pending", Reason="", readiness=false. Elapsed: 2.006111326s
+Jun 20 10:29:26.443: INFO: Pod "pod-b4b6cfeb-ee6f-4eaf-8c5d-1c4569d4e974": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.009503955s
+STEP: Saw pod success
+Jun 20 10:29:26.443: INFO: Pod "pod-b4b6cfeb-ee6f-4eaf-8c5d-1c4569d4e974" satisfied condition "success or failure"
+Jun 20 10:29:26.446: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-b4b6cfeb-ee6f-4eaf-8c5d-1c4569d4e974 container test-container: 
+STEP: delete the pod
+Jun 20 10:29:26.463: INFO: Waiting for pod pod-b4b6cfeb-ee6f-4eaf-8c5d-1c4569d4e974 to disappear
+Jun 20 10:29:26.465: INFO: Pod pod-b4b6cfeb-ee6f-4eaf-8c5d-1c4569d4e974 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:29:26.465: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-8793" for this suite.
+Jun 20 10:29:32.478: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:29:32.542: INFO: namespace emptydir-8793 deletion completed in 6.073969079s
+
+• [SLOW TEST:10.147 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Docker Containers 
+  should use the image defaults if command and args are blank [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:29:32.542: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename containers
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should use the image defaults if command and args are blank [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test use defaults
+Jun 20 10:29:32.581: INFO: Waiting up to 5m0s for pod "client-containers-033e6d59-6bec-445c-9ae3-2ca14b9e0fd5" in namespace "containers-2497" to be "success or failure"
+Jun 20 10:29:32.583: INFO: Pod "client-containers-033e6d59-6bec-445c-9ae3-2ca14b9e0fd5": Phase="Pending", Reason="", readiness=false. Elapsed: 2.197103ms
+Jun 20 10:29:34.586: INFO: Pod "client-containers-033e6d59-6bec-445c-9ae3-2ca14b9e0fd5": Phase="Pending", Reason="", readiness=false. Elapsed: 2.005611864s
+Jun 20 10:29:36.590: INFO: Pod "client-containers-033e6d59-6bec-445c-9ae3-2ca14b9e0fd5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.008757997s
+STEP: Saw pod success
+Jun 20 10:29:36.590: INFO: Pod "client-containers-033e6d59-6bec-445c-9ae3-2ca14b9e0fd5" satisfied condition "success or failure"
+Jun 20 10:29:36.592: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod client-containers-033e6d59-6bec-445c-9ae3-2ca14b9e0fd5 container test-container: 
+STEP: delete the pod
+Jun 20 10:29:36.608: INFO: Waiting for pod client-containers-033e6d59-6bec-445c-9ae3-2ca14b9e0fd5 to disappear
+Jun 20 10:29:36.610: INFO: Pod client-containers-033e6d59-6bec-445c-9ae3-2ca14b9e0fd5 no longer exists
+[AfterEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:29:36.610: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "containers-2497" for this suite.
+Jun 20 10:29:42.621: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:29:42.686: INFO: namespace containers-2497 deletion completed in 6.073010721s
+
+• [SLOW TEST:10.144 seconds]
+[k8s.io] Docker Containers
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  should use the image defaults if command and args are blank [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+S
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should delete pods created by rc when not orphaning [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:29:42.686: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename gc
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should delete pods created by rc when not orphaning [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: create the rc
+STEP: delete the rc
+STEP: wait for all pods to be garbage collected
+STEP: Gathering metrics
+W0620 10:29:52.741737      15 metrics_grabber.go:79] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
+Jun 20 10:29:52.741: INFO: For apiserver_request_total:
+For apiserver_request_latencies_summary:
+For apiserver_init_events_total:
+For garbage_collector_attempt_to_delete_queue_latency:
+For garbage_collector_attempt_to_delete_work_duration:
+For garbage_collector_attempt_to_orphan_queue_latency:
+For garbage_collector_attempt_to_orphan_work_duration:
+For garbage_collector_dirty_processing_latency_microseconds:
+For garbage_collector_event_processing_latency_microseconds:
+For garbage_collector_graph_changes_queue_latency:
+For garbage_collector_graph_changes_work_duration:
+For garbage_collector_orphan_processing_latency_microseconds:
+For namespace_queue_latency:
+For namespace_queue_latency_sum:
+For namespace_queue_latency_count:
+For namespace_retries:
+For namespace_work_duration:
+For namespace_work_duration_sum:
+For namespace_work_duration_count:
+For function_duration_seconds:
+For errors_total:
+For evicted_pods_total:
+
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:29:52.741: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "gc-2361" for this suite.
+Jun 20 10:29:58.756: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:29:58.828: INFO: namespace gc-2361 deletion completed in 6.083486483s
+
+• [SLOW TEST:16.142 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23
+  should delete pods created by rc when not orphaning [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSS
+------------------------------
+[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook 
+  should execute prestop http hook properly [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Container Lifecycle Hook
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:29:58.828: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename container-lifecycle-hook
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] when create a pod with lifecycle hook
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:63
+STEP: create the container to handle the HTTPGet hook request.
+[It] should execute prestop http hook properly [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: create the pod with lifecycle hook
+STEP: delete the pod with lifecycle hook
+Jun 20 10:30:06.897: INFO: Waiting for pod pod-with-prestop-http-hook to disappear
+Jun 20 10:30:06.900: INFO: Pod pod-with-prestop-http-hook still exists
+Jun 20 10:30:08.901: INFO: Waiting for pod pod-with-prestop-http-hook to disappear
+Jun 20 10:30:08.904: INFO: Pod pod-with-prestop-http-hook still exists
+Jun 20 10:30:10.901: INFO: Waiting for pod pod-with-prestop-http-hook to disappear
+Jun 20 10:30:10.904: INFO: Pod pod-with-prestop-http-hook no longer exists
+STEP: check prestop hook
+[AfterEach] [k8s.io] Container Lifecycle Hook
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:30:10.916: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-lifecycle-hook-4300" for this suite.
+Jun 20 10:30:32.931: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:30:33.005: INFO: namespace container-lifecycle-hook-4300 deletion completed in 22.085654658s
+
+• [SLOW TEST:34.177 seconds]
+[k8s.io] Container Lifecycle Hook
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  when create a pod with lifecycle hook
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:42
+    should execute prestop http hook properly [NodeConformance] [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl expose 
+  should create services for rc  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:30:33.006: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:221
+[It] should create services for rc  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: creating Redis RC
+Jun 20 10:30:33.042: INFO: namespace kubectl-3665
+Jun 20 10:30:33.042: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 create -f - --namespace=kubectl-3665'
+Jun 20 10:30:33.211: INFO: stderr: ""
+Jun 20 10:30:33.212: INFO: stdout: "replicationcontroller/redis-master created\n"
+STEP: Waiting for Redis master to start.
+Jun 20 10:30:34.219: INFO: Selector matched 1 pods for map[app:redis]
+Jun 20 10:30:34.219: INFO: Found 0 / 1
+Jun 20 10:30:35.215: INFO: Selector matched 1 pods for map[app:redis]
+Jun 20 10:30:35.215: INFO: Found 0 / 1
+Jun 20 10:30:36.215: INFO: Selector matched 1 pods for map[app:redis]
+Jun 20 10:30:36.215: INFO: Found 1 / 1
+Jun 20 10:30:36.215: INFO: WaitFor completed with timeout 5m0s.  Pods found = 1 out of 1
+Jun 20 10:30:36.217: INFO: Selector matched 1 pods for map[app:redis]
+Jun 20 10:30:36.217: INFO: ForEach: Found 1 pods from the filter.  Now looping through them.
+Jun 20 10:30:36.217: INFO: wait on redis-master startup in kubectl-3665 
+Jun 20 10:30:36.218: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 logs redis-master-spz2g redis-master --namespace=kubectl-3665'
+Jun 20 10:30:36.304: INFO: stderr: ""
+Jun 20 10:30:36.304: INFO: stdout: "                _._                                                  \n           _.-``__ ''-._                                             \n      _.-``    `.  `_.  ''-._           Redis 3.2.12 (35a5711f/0) 64 bit\n  .-`` .-```.  ```\\/    _.,_ ''-._                                   \n (    '      ,       .-`  | `,    )     Running in standalone mode\n |`-._`-...-` __...-.``-._|'` _.-'|     Port: 6379\n |    `-._   `._    /     _.-'    |     PID: 1\n  `-._    `-._  `-./  _.-'    _.-'                                   \n |`-._`-._    `-.__.-'    _.-'_.-'|                                  \n |    `-._`-._        _.-'_.-'    |           http://redis.io        \n  `-._    `-._`-.__.-'_.-'    _.-'                                   \n |`-._`-._    `-.__.-'    _.-'_.-'|                                  \n |    `-._`-._        _.-'_.-'    |                                  \n  `-._    `-._`-.__.-'_.-'    _.-'                                   \n      `-._    `-.__.-'    _.-'                                       \n          `-._        _.-'                                           \n              `-.__.-'                                               \n\n1:M 20 Jun 10:30:35.346 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.\n1:M 20 Jun 10:30:35.346 # Server started, Redis version 3.2.12\n1:M 20 Jun 10:30:35.346 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.\n1:M 20 Jun 10:30:35.346 * The server is now ready to accept connections on port 6379\n"
+STEP: exposing RC
+Jun 20 10:30:36.304: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 expose rc redis-master --name=rm2 --port=1234 --target-port=6379 --namespace=kubectl-3665'
+Jun 20 10:30:36.392: INFO: stderr: ""
+Jun 20 10:30:36.392: INFO: stdout: "service/rm2 exposed\n"
+Jun 20 10:30:36.399: INFO: Service rm2 in namespace kubectl-3665 found.
+STEP: exposing service
+Jun 20 10:30:38.403: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 expose service rm2 --name=rm3 --port=2345 --target-port=6379 --namespace=kubectl-3665'
+Jun 20 10:30:38.480: INFO: stderr: ""
+Jun 20 10:30:38.480: INFO: stdout: "service/rm3 exposed\n"
+Jun 20 10:30:38.483: INFO: Service rm3 in namespace kubectl-3665 found.
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:30:40.489: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-3665" for this suite.
+Jun 20 10:31:02.502: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:31:02.575: INFO: namespace kubectl-3665 deletion completed in 22.083533387s
+
+• [SLOW TEST:29.570 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl expose
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+    should create services for rc  [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] InitContainer [NodeConformance] 
+  should invoke init containers on a RestartAlways pod [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:31:02.575: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename init-container
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:44
+[It] should invoke init containers on a RestartAlways pod [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: creating the pod
+Jun 20 10:31:02.610: INFO: PodSpec: initContainers in spec.initContainers
+[AfterEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:31:06.674: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "init-container-1007" for this suite.
+Jun 20 10:31:28.692: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:31:28.761: INFO: namespace init-container-1007 deletion completed in 22.079954924s
+
+• [SLOW TEST:26.186 seconds]
+[k8s.io] InitContainer [NodeConformance]
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  should invoke init containers on a RestartAlways pod [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should orphan pods created by rc if delete options say so [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:31:28.761: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename gc
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should orphan pods created by rc if delete options say so [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: create the rc
+STEP: delete the rc
+STEP: wait for the rc to be deleted
+STEP: wait for 30 seconds to see if the garbage collector mistakenly deletes the pods
+STEP: Gathering metrics
+W0620 10:32:08.821189      15 metrics_grabber.go:79] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
+Jun 20 10:32:08.821: INFO: For apiserver_request_total:
+For apiserver_request_latencies_summary:
+For apiserver_init_events_total:
+For garbage_collector_attempt_to_delete_queue_latency:
+For garbage_collector_attempt_to_delete_work_duration:
+For garbage_collector_attempt_to_orphan_queue_latency:
+For garbage_collector_attempt_to_orphan_work_duration:
+For garbage_collector_dirty_processing_latency_microseconds:
+For garbage_collector_event_processing_latency_microseconds:
+For garbage_collector_graph_changes_queue_latency:
+For garbage_collector_graph_changes_work_duration:
+For garbage_collector_orphan_processing_latency_microseconds:
+For namespace_queue_latency:
+For namespace_queue_latency_sum:
+For namespace_queue_latency_count:
+For namespace_retries:
+For namespace_work_duration:
+For namespace_work_duration_sum:
+For namespace_work_duration_count:
+For function_duration_seconds:
+For errors_total:
+For evicted_pods_total:
+
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:32:08.821: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "gc-2223" for this suite.
+Jun 20 10:32:14.834: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:32:14.901: INFO: namespace gc-2223 deletion completed in 6.077007017s
+
+• [SLOW TEST:46.140 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23
+  should orphan pods created by rc if delete options say so [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSS
+------------------------------
+[sig-storage] Projected secret 
+  should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Projected secret
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:32:14.902: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating projection with secret that has name projected-secret-test-40ec2816-bf9f-44a1-a7ea-15aa509e8cf1
+STEP: Creating a pod to test consume secrets
+Jun 20 10:32:14.947: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-27ce63b7-66b8-46c4-98d9-3c51786391d1" in namespace "projected-5722" to be "success or failure"
+Jun 20 10:32:14.951: INFO: Pod "pod-projected-secrets-27ce63b7-66b8-46c4-98d9-3c51786391d1": Phase="Pending", Reason="", readiness=false. Elapsed: 4.153926ms
+Jun 20 10:32:16.954: INFO: Pod "pod-projected-secrets-27ce63b7-66b8-46c4-98d9-3c51786391d1": Phase="Pending", Reason="", readiness=false. Elapsed: 2.007337182s
+Jun 20 10:32:18.958: INFO: Pod "pod-projected-secrets-27ce63b7-66b8-46c4-98d9-3c51786391d1": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.010603153s
+STEP: Saw pod success
+Jun 20 10:32:18.958: INFO: Pod "pod-projected-secrets-27ce63b7-66b8-46c4-98d9-3c51786391d1" satisfied condition "success or failure"
+Jun 20 10:32:18.960: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-projected-secrets-27ce63b7-66b8-46c4-98d9-3c51786391d1 container projected-secret-volume-test: 
+STEP: delete the pod
+Jun 20 10:32:18.979: INFO: Waiting for pod pod-projected-secrets-27ce63b7-66b8-46c4-98d9-3c51786391d1 to disappear
+Jun 20 10:32:18.982: INFO: Pod pod-projected-secrets-27ce63b7-66b8-46c4-98d9-3c51786391d1 no longer exists
+[AfterEach] [sig-storage] Projected secret
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:32:18.982: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-5722" for this suite.
+Jun 20 10:32:24.996: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:32:25.068: INFO: namespace projected-5722 deletion completed in 6.082418285s
+
+• [SLOW TEST:10.166 seconds]
+[sig-storage] Projected secret
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:33
+  should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected configMap 
+  should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:32:25.068: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating configMap with name projected-configmap-test-volume-map-743ce871-1a89-45dd-a5bb-d2d7427c20e0
+STEP: Creating a pod to test consume configMaps
+Jun 20 10:32:25.111: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-1a03550a-f53f-4f5a-b8b9-a9897a2669ca" in namespace "projected-9819" to be "success or failure"
+Jun 20 10:32:25.119: INFO: Pod "pod-projected-configmaps-1a03550a-f53f-4f5a-b8b9-a9897a2669ca": Phase="Pending", Reason="", readiness=false. Elapsed: 7.47808ms
+Jun 20 10:32:27.122: INFO: Pod "pod-projected-configmaps-1a03550a-f53f-4f5a-b8b9-a9897a2669ca": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.010986268s
+STEP: Saw pod success
+Jun 20 10:32:27.122: INFO: Pod "pod-projected-configmaps-1a03550a-f53f-4f5a-b8b9-a9897a2669ca" satisfied condition "success or failure"
+Jun 20 10:32:27.125: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-projected-configmaps-1a03550a-f53f-4f5a-b8b9-a9897a2669ca container projected-configmap-volume-test: 
+STEP: delete the pod
+Jun 20 10:32:27.152: INFO: Waiting for pod pod-projected-configmaps-1a03550a-f53f-4f5a-b8b9-a9897a2669ca to disappear
+Jun 20 10:32:27.155: INFO: Pod pod-projected-configmaps-1a03550a-f53f-4f5a-b8b9-a9897a2669ca no longer exists
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:32:27.155: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-9819" for this suite.
+Jun 20 10:32:33.167: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:32:33.235: INFO: namespace projected-9819 deletion completed in 6.077007325s
+
+• [SLOW TEST:8.167 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:33
+  should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Secrets 
+  should be consumable from pods in env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-api-machinery] Secrets
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:32:33.235: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename secrets
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating secret with name secret-test-0d411aa9-4a44-4b2a-b4de-1b337cbdf34b
+STEP: Creating a pod to test consume secrets
+Jun 20 10:32:33.278: INFO: Waiting up to 5m0s for pod "pod-secrets-13e4eeb0-67ac-44a8-b141-e2a2069f306b" in namespace "secrets-1580" to be "success or failure"
+Jun 20 10:32:33.281: INFO: Pod "pod-secrets-13e4eeb0-67ac-44a8-b141-e2a2069f306b": Phase="Pending", Reason="", readiness=false. Elapsed: 2.723401ms
+Jun 20 10:32:35.285: INFO: Pod "pod-secrets-13e4eeb0-67ac-44a8-b141-e2a2069f306b": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006292253s
+STEP: Saw pod success
+Jun 20 10:32:35.285: INFO: Pod "pod-secrets-13e4eeb0-67ac-44a8-b141-e2a2069f306b" satisfied condition "success or failure"
+Jun 20 10:32:35.287: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-secrets-13e4eeb0-67ac-44a8-b141-e2a2069f306b container secret-env-test: 
+STEP: delete the pod
+Jun 20 10:32:35.304: INFO: Waiting for pod pod-secrets-13e4eeb0-67ac-44a8-b141-e2a2069f306b to disappear
+Jun 20 10:32:35.306: INFO: Pod pod-secrets-13e4eeb0-67ac-44a8-b141-e2a2069f306b no longer exists
+[AfterEach] [sig-api-machinery] Secrets
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:32:35.306: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "secrets-1580" for this suite.
+Jun 20 10:32:41.318: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:32:41.386: INFO: namespace secrets-1580 deletion completed in 6.076820732s
+
+• [SLOW TEST:8.151 seconds]
+[sig-api-machinery] Secrets
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets.go:31
+  should be consumable from pods in env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should provide podname only [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:32:41.386: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should provide podname only [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test downward API volume plugin
+Jun 20 10:32:41.430: INFO: Waiting up to 5m0s for pod "downwardapi-volume-923c2e14-63ea-4f77-952d-9da5ee067276" in namespace "projected-8965" to be "success or failure"
+Jun 20 10:32:41.433: INFO: Pod "downwardapi-volume-923c2e14-63ea-4f77-952d-9da5ee067276": Phase="Pending", Reason="", readiness=false. Elapsed: 3.114198ms
+Jun 20 10:32:43.436: INFO: Pod "downwardapi-volume-923c2e14-63ea-4f77-952d-9da5ee067276": Phase="Running", Reason="", readiness=true. Elapsed: 2.006406395s
+Jun 20 10:32:45.439: INFO: Pod "downwardapi-volume-923c2e14-63ea-4f77-952d-9da5ee067276": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.009703083s
+STEP: Saw pod success
+Jun 20 10:32:45.439: INFO: Pod "downwardapi-volume-923c2e14-63ea-4f77-952d-9da5ee067276" satisfied condition "success or failure"
+Jun 20 10:32:45.442: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod downwardapi-volume-923c2e14-63ea-4f77-952d-9da5ee067276 container client-container: 
+STEP: delete the pod
+Jun 20 10:32:45.459: INFO: Waiting for pod downwardapi-volume-923c2e14-63ea-4f77-952d-9da5ee067276 to disappear
+Jun 20 10:32:45.462: INFO: Pod downwardapi-volume-923c2e14-63ea-4f77-952d-9da5ee067276 no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:32:45.462: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-8965" for this suite.
+Jun 20 10:32:51.476: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:32:51.539: INFO: namespace projected-8965 deletion completed in 6.074156091s
+
+• [SLOW TEST:10.153 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should provide podname only [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:32:51.539: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test emptydir 0777 on node default medium
+Jun 20 10:32:51.629: INFO: Waiting up to 5m0s for pod "pod-eb99fd52-27d9-4568-a740-eefaad0e9184" in namespace "emptydir-5068" to be "success or failure"
+Jun 20 10:32:51.632: INFO: Pod "pod-eb99fd52-27d9-4568-a740-eefaad0e9184": Phase="Pending", Reason="", readiness=false. Elapsed: 3.07074ms
+Jun 20 10:32:53.636: INFO: Pod "pod-eb99fd52-27d9-4568-a740-eefaad0e9184": Phase="Pending", Reason="", readiness=false. Elapsed: 2.006712184s
+Jun 20 10:32:55.639: INFO: Pod "pod-eb99fd52-27d9-4568-a740-eefaad0e9184": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.00996538s
+STEP: Saw pod success
+Jun 20 10:32:55.639: INFO: Pod "pod-eb99fd52-27d9-4568-a740-eefaad0e9184" satisfied condition "success or failure"
+Jun 20 10:32:55.641: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-eb99fd52-27d9-4568-a740-eefaad0e9184 container test-container: 
+STEP: delete the pod
+Jun 20 10:32:55.658: INFO: Waiting for pod pod-eb99fd52-27d9-4568-a740-eefaad0e9184 to disappear
+Jun 20 10:32:55.661: INFO: Pod pod-eb99fd52-27d9-4568-a740-eefaad0e9184 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:32:55.661: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-5068" for this suite.
+Jun 20 10:33:01.673: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:33:01.749: INFO: namespace emptydir-5068 deletion completed in 6.085778806s
+
+• [SLOW TEST:10.210 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should provide container's cpu request [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:33:01.750: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should provide container's cpu request [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test downward API volume plugin
+Jun 20 10:33:01.791: INFO: Waiting up to 5m0s for pod "downwardapi-volume-37291a53-d2fe-47b6-b37e-8f7d5732fc7a" in namespace "projected-8360" to be "success or failure"
+Jun 20 10:33:01.797: INFO: Pod "downwardapi-volume-37291a53-d2fe-47b6-b37e-8f7d5732fc7a": Phase="Pending", Reason="", readiness=false. Elapsed: 5.550037ms
+Jun 20 10:33:03.800: INFO: Pod "downwardapi-volume-37291a53-d2fe-47b6-b37e-8f7d5732fc7a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.0087178s
+Jun 20 10:33:05.804: INFO: Pod "downwardapi-volume-37291a53-d2fe-47b6-b37e-8f7d5732fc7a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01223665s
+STEP: Saw pod success
+Jun 20 10:33:05.804: INFO: Pod "downwardapi-volume-37291a53-d2fe-47b6-b37e-8f7d5732fc7a" satisfied condition "success or failure"
+Jun 20 10:33:05.806: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod downwardapi-volume-37291a53-d2fe-47b6-b37e-8f7d5732fc7a container client-container: 
+STEP: delete the pod
+Jun 20 10:33:05.825: INFO: Waiting for pod downwardapi-volume-37291a53-d2fe-47b6-b37e-8f7d5732fc7a to disappear
+Jun 20 10:33:05.829: INFO: Pod downwardapi-volume-37291a53-d2fe-47b6-b37e-8f7d5732fc7a no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:33:05.829: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-8360" for this suite.
+Jun 20 10:33:11.849: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:33:11.915: INFO: namespace projected-8360 deletion completed in 6.077828805s
+
+• [SLOW TEST:10.166 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should provide container's cpu request [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should provide container's cpu limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:33:11.916: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should provide container's cpu limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test downward API volume plugin
+Jun 20 10:33:11.956: INFO: Waiting up to 5m0s for pod "downwardapi-volume-8c605635-6961-4735-a718-7de1218616d5" in namespace "downward-api-7886" to be "success or failure"
+Jun 20 10:33:11.962: INFO: Pod "downwardapi-volume-8c605635-6961-4735-a718-7de1218616d5": Phase="Pending", Reason="", readiness=false. Elapsed: 6.003276ms
+Jun 20 10:33:13.965: INFO: Pod "downwardapi-volume-8c605635-6961-4735-a718-7de1218616d5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.009359822s
+STEP: Saw pod success
+Jun 20 10:33:13.965: INFO: Pod "downwardapi-volume-8c605635-6961-4735-a718-7de1218616d5" satisfied condition "success or failure"
+Jun 20 10:33:13.968: INFO: Trying to get logs from node ip-10-100-12-226.eu-west-1.compute.internal pod downwardapi-volume-8c605635-6961-4735-a718-7de1218616d5 container client-container: 
+STEP: delete the pod
+Jun 20 10:33:13.990: INFO: Waiting for pod downwardapi-volume-8c605635-6961-4735-a718-7de1218616d5 to disappear
+Jun 20 10:33:13.992: INFO: Pod downwardapi-volume-8c605635-6961-4735-a718-7de1218616d5 no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:33:13.992: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-7886" for this suite.
+Jun 20 10:33:20.011: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:33:20.082: INFO: namespace downward-api-7886 deletion completed in 6.087274065s
+
+• [SLOW TEST:8.167 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should provide container's cpu limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+[k8s.io] Container Runtime blackbox test on terminated container 
+  should report termination message [LinuxOnly] from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Container Runtime
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:33:20.082: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename container-runtime
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should report termination message [LinuxOnly] from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: create the container
+STEP: wait for the container to reach Succeeded
+STEP: get the container status
+STEP: the container should be terminated
+STEP: the termination message should be set
+Jun 20 10:33:23.135: INFO: Expected: &{OK} to match Container's Termination Message: OK --
+STEP: delete the container
+[AfterEach] [k8s.io] Container Runtime
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:33:23.148: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-runtime-7783" for this suite.
+Jun 20 10:33:29.161: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:33:29.233: INFO: namespace container-runtime-7783 deletion completed in 6.082543908s
+
+• [SLOW TEST:9.151 seconds]
+[k8s.io] Container Runtime
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  blackbox test
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/runtime.go:38
+    on terminated container
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/runtime.go:129
+      should report termination message [LinuxOnly] from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]
+      /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:33:29.234: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test downward API volume plugin
+Jun 20 10:33:29.275: INFO: Waiting up to 5m0s for pod "downwardapi-volume-e6750aff-074d-44a0-916f-31459d969dfa" in namespace "downward-api-7493" to be "success or failure"
+Jun 20 10:33:29.279: INFO: Pod "downwardapi-volume-e6750aff-074d-44a0-916f-31459d969dfa": Phase="Pending", Reason="", readiness=false. Elapsed: 3.654087ms
+Jun 20 10:33:31.282: INFO: Pod "downwardapi-volume-e6750aff-074d-44a0-916f-31459d969dfa": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006924862s
+STEP: Saw pod success
+Jun 20 10:33:31.282: INFO: Pod "downwardapi-volume-e6750aff-074d-44a0-916f-31459d969dfa" satisfied condition "success or failure"
+Jun 20 10:33:31.285: INFO: Trying to get logs from node ip-10-100-12-226.eu-west-1.compute.internal pod downwardapi-volume-e6750aff-074d-44a0-916f-31459d969dfa container client-container: 
+STEP: delete the pod
+Jun 20 10:33:31.302: INFO: Waiting for pod downwardapi-volume-e6750aff-074d-44a0-916f-31459d969dfa to disappear
+Jun 20 10:33:31.305: INFO: Pod downwardapi-volume-e6750aff-074d-44a0-916f-31459d969dfa no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:33:31.305: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-7493" for this suite.
+Jun 20 10:33:37.318: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:33:37.397: INFO: namespace downward-api-7493 deletion completed in 6.088615158s
+
+• [SLOW TEST:8.163 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:33:37.397: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test emptydir 0777 on tmpfs
+Jun 20 10:33:37.436: INFO: Waiting up to 5m0s for pod "pod-f395f75f-6bbd-42dc-bb3f-9dd5503ea267" in namespace "emptydir-2063" to be "success or failure"
+Jun 20 10:33:37.440: INFO: Pod "pod-f395f75f-6bbd-42dc-bb3f-9dd5503ea267": Phase="Pending", Reason="", readiness=false. Elapsed: 3.162948ms
+Jun 20 10:33:39.446: INFO: Pod "pod-f395f75f-6bbd-42dc-bb3f-9dd5503ea267": Phase="Pending", Reason="", readiness=false. Elapsed: 2.009188577s
+Jun 20 10:33:41.449: INFO: Pod "pod-f395f75f-6bbd-42dc-bb3f-9dd5503ea267": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.012284461s
+STEP: Saw pod success
+Jun 20 10:33:41.449: INFO: Pod "pod-f395f75f-6bbd-42dc-bb3f-9dd5503ea267" satisfied condition "success or failure"
+Jun 20 10:33:41.451: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-f395f75f-6bbd-42dc-bb3f-9dd5503ea267 container test-container: 
+STEP: delete the pod
+Jun 20 10:33:41.468: INFO: Waiting for pod pod-f395f75f-6bbd-42dc-bb3f-9dd5503ea267 to disappear
+Jun 20 10:33:41.470: INFO: Pod pod-f395f75f-6bbd-42dc-bb3f-9dd5503ea267 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:33:41.470: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-2063" for this suite.
+Jun 20 10:33:47.485: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:33:47.547: INFO: namespace emptydir-2063 deletion completed in 6.072064358s
+
+• [SLOW TEST:10.150 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Probing container 
+  should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:33:47.548: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename container-probe
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:51
+[It] should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating pod liveness-5f3ecc10-2d33-4589-8b8e-d75b5c81456d in namespace container-probe-9688
+Jun 20 10:33:51.595: INFO: Started pod liveness-5f3ecc10-2d33-4589-8b8e-d75b5c81456d in namespace container-probe-9688
+STEP: checking the pod's current state and verifying that restartCount is present
+Jun 20 10:33:51.598: INFO: Initial restart count of pod liveness-5f3ecc10-2d33-4589-8b8e-d75b5c81456d is 0
+Jun 20 10:34:15.643: INFO: Restart count of pod container-probe-9688/liveness-5f3ecc10-2d33-4589-8b8e-d75b5c81456d is now 1 (24.044894857s elapsed)
+STEP: deleting the pod
+[AfterEach] [k8s.io] Probing container
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:34:15.655: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-probe-9688" for this suite.
+Jun 20 10:34:21.670: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:34:21.740: INFO: namespace container-probe-9688 deletion completed in 6.081078552s
+
+• [SLOW TEST:34.192 seconds]
+[k8s.io] Probing container
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+[sig-storage] ConfigMap 
+  should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:34:21.740: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename configmap
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating configMap with name configmap-test-volume-map-ae9e8752-a797-49e3-9dc4-6f5d1a3b4de3
+STEP: Creating a pod to test consume configMaps
+Jun 20 10:34:21.789: INFO: Waiting up to 5m0s for pod "pod-configmaps-ab374530-1f2f-4889-98cb-a62b2e541611" in namespace "configmap-3941" to be "success or failure"
+Jun 20 10:34:21.793: INFO: Pod "pod-configmaps-ab374530-1f2f-4889-98cb-a62b2e541611": Phase="Pending", Reason="", readiness=false. Elapsed: 3.497638ms
+Jun 20 10:34:23.797: INFO: Pod "pod-configmaps-ab374530-1f2f-4889-98cb-a62b2e541611": Phase="Pending", Reason="", readiness=false. Elapsed: 2.007669089s
+Jun 20 10:34:25.800: INFO: Pod "pod-configmaps-ab374530-1f2f-4889-98cb-a62b2e541611": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01058468s
+STEP: Saw pod success
+Jun 20 10:34:25.800: INFO: Pod "pod-configmaps-ab374530-1f2f-4889-98cb-a62b2e541611" satisfied condition "success or failure"
+Jun 20 10:34:25.802: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-configmaps-ab374530-1f2f-4889-98cb-a62b2e541611 container configmap-volume-test: 
+STEP: delete the pod
+Jun 20 10:34:25.831: INFO: Waiting for pod pod-configmaps-ab374530-1f2f-4889-98cb-a62b2e541611 to disappear
+Jun 20 10:34:25.833: INFO: Pod pod-configmaps-ab374530-1f2f-4889-98cb-a62b2e541611 no longer exists
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:34:25.834: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "configmap-3941" for this suite.
+Jun 20 10:34:31.852: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:34:31.919: INFO: namespace configmap-3941 deletion completed in 6.082839337s
+
+• [SLOW TEST:10.179 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32
+  should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] [sig-node] PreStop 
+  should call prestop when killing a pod  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] [sig-node] PreStop
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:34:31.919: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename prestop
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] [sig-node] PreStop
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/node/pre_stop.go:167
+[It] should call prestop when killing a pod  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating server pod server in namespace prestop-9663
+STEP: Waiting for pods to come up.
+STEP: Creating tester pod tester in namespace prestop-9663
+STEP: Deleting pre-stop pod
+Jun 20 10:34:44.991: INFO: Saw: {
+	"Hostname": "server",
+	"Sent": null,
+	"Received": {
+		"prestop": 1
+	},
+	"Errors": null,
+	"Log": [
+		"default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up.",
+		"default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up.",
+		"default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up."
+	],
+	"StillContactingPeers": true
+}
+STEP: Deleting the server pod
+[AfterEach] [k8s.io] [sig-node] PreStop
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:34:44.997: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "prestop-9663" for this suite.
+Jun 20 10:35:23.011: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:35:23.076: INFO: namespace prestop-9663 deletion completed in 38.074600665s
+
+• [SLOW TEST:51.156 seconds]
+[k8s.io] [sig-node] PreStop
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  should call prestop when killing a pod  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+S
+------------------------------
+[sig-storage] Projected secret 
+  should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Projected secret
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:35:23.076: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating secret with name projected-secret-test-72ee820b-41b3-43f1-8fea-627cfdcea09c
+STEP: Creating a pod to test consume secrets
+Jun 20 10:35:23.120: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-1bfc5159-7291-4f99-8efe-9acb828f922c" in namespace "projected-5310" to be "success or failure"
+Jun 20 10:35:23.126: INFO: Pod "pod-projected-secrets-1bfc5159-7291-4f99-8efe-9acb828f922c": Phase="Pending", Reason="", readiness=false. Elapsed: 5.623025ms
+Jun 20 10:35:25.129: INFO: Pod "pod-projected-secrets-1bfc5159-7291-4f99-8efe-9acb828f922c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.008689985s
+STEP: Saw pod success
+Jun 20 10:35:25.129: INFO: Pod "pod-projected-secrets-1bfc5159-7291-4f99-8efe-9acb828f922c" satisfied condition "success or failure"
+Jun 20 10:35:25.131: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-projected-secrets-1bfc5159-7291-4f99-8efe-9acb828f922c container secret-volume-test: 
+STEP: delete the pod
+Jun 20 10:35:25.151: INFO: Waiting for pod pod-projected-secrets-1bfc5159-7291-4f99-8efe-9acb828f922c to disappear
+Jun 20 10:35:25.153: INFO: Pod pod-projected-secrets-1bfc5159-7291-4f99-8efe-9acb828f922c no longer exists
+[AfterEach] [sig-storage] Projected secret
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:35:25.153: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-5310" for this suite.
+Jun 20 10:35:31.166: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:35:31.233: INFO: namespace projected-5310 deletion completed in 6.077047207s
+
+• [SLOW TEST:8.157 seconds]
+[sig-storage] Projected secret
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:33
+  should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] Daemon set [Serial] 
+  should update pod when spec was updated and update strategy is RollingUpdate [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:35:31.233: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename daemonsets
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:103
+[It] should update pod when spec was updated and update strategy is RollingUpdate [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+Jun 20 10:35:31.277: INFO: Creating simple daemon set daemon-set
+STEP: Check that daemon pods launch on every node of the cluster.
+Jun 20 10:35:31.286: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:31.286: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:31.286: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:31.288: INFO: Number of nodes with available pods: 0
+Jun 20 10:35:31.288: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 10:35:32.292: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:32.292: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:32.292: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:32.295: INFO: Number of nodes with available pods: 0
+Jun 20 10:35:32.295: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 10:35:33.292: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:33.292: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:33.292: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:33.294: INFO: Number of nodes with available pods: 2
+Jun 20 10:35:33.294: INFO: Number of running nodes: 2, number of available pods: 2
+STEP: Update daemon pods image.
+STEP: Check that daemon pods images are updated.
+Jun 20 10:35:33.317: INFO: Wrong image for pod: daemon-set-5cqfb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:33.317: INFO: Wrong image for pod: daemon-set-5jspb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:33.320: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:33.321: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:33.321: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:34.326: INFO: Wrong image for pod: daemon-set-5cqfb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:34.326: INFO: Wrong image for pod: daemon-set-5jspb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:34.329: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:34.329: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:34.329: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:35.324: INFO: Wrong image for pod: daemon-set-5cqfb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:35.324: INFO: Wrong image for pod: daemon-set-5jspb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:35.331: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:35.331: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:35.331: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:36.324: INFO: Wrong image for pod: daemon-set-5cqfb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:36.324: INFO: Wrong image for pod: daemon-set-5jspb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:36.324: INFO: Pod daemon-set-5jspb is not available
+Jun 20 10:35:36.327: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:36.327: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:36.327: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:37.324: INFO: Wrong image for pod: daemon-set-5cqfb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:37.324: INFO: Wrong image for pod: daemon-set-5jspb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:37.324: INFO: Pod daemon-set-5jspb is not available
+Jun 20 10:35:37.327: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:37.327: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:37.327: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:38.324: INFO: Wrong image for pod: daemon-set-5cqfb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:38.324: INFO: Wrong image for pod: daemon-set-5jspb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:38.324: INFO: Pod daemon-set-5jspb is not available
+Jun 20 10:35:38.327: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:38.327: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:38.327: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:39.328: INFO: Wrong image for pod: daemon-set-5cqfb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:39.328: INFO: Wrong image for pod: daemon-set-5jspb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:39.328: INFO: Pod daemon-set-5jspb is not available
+Jun 20 10:35:39.331: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:39.331: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:39.331: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:40.325: INFO: Wrong image for pod: daemon-set-5cqfb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:40.325: INFO: Wrong image for pod: daemon-set-5jspb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:40.325: INFO: Pod daemon-set-5jspb is not available
+Jun 20 10:35:40.328: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:40.328: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:40.328: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:41.324: INFO: Wrong image for pod: daemon-set-5cqfb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:41.325: INFO: Wrong image for pod: daemon-set-5jspb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:41.325: INFO: Pod daemon-set-5jspb is not available
+Jun 20 10:35:41.327: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:41.327: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:41.327: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:42.324: INFO: Wrong image for pod: daemon-set-5cqfb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:42.324: INFO: Wrong image for pod: daemon-set-5jspb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:42.324: INFO: Pod daemon-set-5jspb is not available
+Jun 20 10:35:42.327: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:42.327: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:42.327: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:43.326: INFO: Wrong image for pod: daemon-set-5cqfb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:43.326: INFO: Wrong image for pod: daemon-set-5jspb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:43.326: INFO: Pod daemon-set-5jspb is not available
+Jun 20 10:35:43.329: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:43.329: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:43.329: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:44.324: INFO: Wrong image for pod: daemon-set-5cqfb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:44.324: INFO: Wrong image for pod: daemon-set-5jspb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:44.324: INFO: Pod daemon-set-5jspb is not available
+Jun 20 10:35:44.327: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:44.327: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:44.327: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:45.324: INFO: Wrong image for pod: daemon-set-5cqfb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:45.324: INFO: Wrong image for pod: daemon-set-5jspb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:45.324: INFO: Pod daemon-set-5jspb is not available
+Jun 20 10:35:45.327: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:45.327: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:45.327: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:46.324: INFO: Wrong image for pod: daemon-set-5cqfb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:46.324: INFO: Wrong image for pod: daemon-set-5jspb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:46.324: INFO: Pod daemon-set-5jspb is not available
+Jun 20 10:35:46.327: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:46.327: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:46.327: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:47.327: INFO: Wrong image for pod: daemon-set-5cqfb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:47.327: INFO: Pod daemon-set-pxl25 is not available
+Jun 20 10:35:47.331: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:47.331: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:47.332: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:48.324: INFO: Wrong image for pod: daemon-set-5cqfb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:48.324: INFO: Pod daemon-set-pxl25 is not available
+Jun 20 10:35:48.327: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:48.327: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:48.327: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:49.324: INFO: Wrong image for pod: daemon-set-5cqfb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:49.324: INFO: Pod daemon-set-pxl25 is not available
+Jun 20 10:35:49.327: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:49.328: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:49.328: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:50.324: INFO: Wrong image for pod: daemon-set-5cqfb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:50.327: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:50.327: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:50.327: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:51.328: INFO: Wrong image for pod: daemon-set-5cqfb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:51.328: INFO: Pod daemon-set-5cqfb is not available
+Jun 20 10:35:51.331: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:51.331: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:51.331: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:52.324: INFO: Wrong image for pod: daemon-set-5cqfb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:52.324: INFO: Pod daemon-set-5cqfb is not available
+Jun 20 10:35:52.327: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:52.327: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:52.327: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:53.324: INFO: Wrong image for pod: daemon-set-5cqfb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:53.324: INFO: Pod daemon-set-5cqfb is not available
+Jun 20 10:35:53.328: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:53.328: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:53.328: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:54.324: INFO: Wrong image for pod: daemon-set-5cqfb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:54.324: INFO: Pod daemon-set-5cqfb is not available
+Jun 20 10:35:54.327: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:54.327: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:54.327: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:55.329: INFO: Wrong image for pod: daemon-set-5cqfb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:55.329: INFO: Pod daemon-set-5cqfb is not available
+Jun 20 10:35:55.332: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:55.332: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:55.332: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:56.324: INFO: Wrong image for pod: daemon-set-5cqfb. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun 20 10:35:56.324: INFO: Pod daemon-set-5cqfb is not available
+Jun 20 10:35:56.327: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:56.327: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:56.327: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:57.324: INFO: Pod daemon-set-mwsd9 is not available
+Jun 20 10:35:57.327: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:57.327: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:57.327: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+STEP: Check that daemon pods are still running on every node of the cluster.
+Jun 20 10:35:57.331: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:57.331: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:57.331: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:57.333: INFO: Number of nodes with available pods: 1
+Jun 20 10:35:57.333: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 10:35:58.337: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:58.337: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:58.337: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:58.340: INFO: Number of nodes with available pods: 1
+Jun 20 10:35:58.340: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 10:35:59.339: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:59.339: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:59.339: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:35:59.342: INFO: Number of nodes with available pods: 1
+Jun 20 10:35:59.342: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 10:36:00.337: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:36:00.337: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:36:00.337: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:36:00.341: INFO: Number of nodes with available pods: 2
+Jun 20 10:36:00.341: INFO: Number of running nodes: 2, number of available pods: 2
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:69
+STEP: Deleting DaemonSet "daemon-set"
+STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-9803, will wait for the garbage collector to delete the pods
+Jun 20 10:36:00.412: INFO: Deleting DaemonSet.extensions daemon-set took: 7.257265ms
+Jun 20 10:36:00.713: INFO: Terminating DaemonSet.extensions daemon-set pods took: 300.188469ms
+Jun 20 10:36:07.216: INFO: Number of nodes with available pods: 0
+Jun 20 10:36:07.216: INFO: Number of running nodes: 0, number of available pods: 0
+Jun 20 10:36:07.218: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/daemonsets-9803/daemonsets","resourceVersion":"16086"},"items":null}
+
+Jun 20 10:36:07.221: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/daemonsets-9803/pods","resourceVersion":"16086"},"items":null}
+
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:36:07.229: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "daemonsets-9803" for this suite.
+Jun 20 10:36:13.248: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:36:13.315: INFO: namespace daemonsets-9803 deletion completed in 6.077673299s
+
+• [SLOW TEST:42.082 seconds]
+[sig-apps] Daemon set [Serial]
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23
+  should update pod when spec was updated and update strategy is RollingUpdate [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] Deployment 
+  RecreateDeployment should delete old pods and create new ones [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:36:13.316: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename deployment
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:68
+[It] RecreateDeployment should delete old pods and create new ones [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+Jun 20 10:36:13.348: INFO: Creating deployment "test-recreate-deployment"
+Jun 20 10:36:13.354: INFO: Waiting deployment "test-recreate-deployment" to be updated to revision 1
+Jun 20 10:36:13.360: INFO: new replicaset for deployment "test-recreate-deployment" is yet to be created
+Jun 20 10:36:15.365: INFO: Waiting deployment "test-recreate-deployment" to complete
+Jun 20 10:36:15.366: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696623773, loc:(*time.Location)(0x80bb5c0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696623773, loc:(*time.Location)(0x80bb5c0)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696623773, loc:(*time.Location)(0x80bb5c0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696623773, loc:(*time.Location)(0x80bb5c0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-6df85df6b9\" is progressing."}}, CollisionCount:(*int32)(nil)}
+Jun 20 10:36:17.369: INFO: Triggering a new rollout for deployment "test-recreate-deployment"
+Jun 20 10:36:17.376: INFO: Updating deployment test-recreate-deployment
+Jun 20 10:36:17.376: INFO: Watching deployment "test-recreate-deployment" to verify that new pods will not run with olds pods
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:62
+Jun 20 10:36:17.454: INFO: Deployment "test-recreate-deployment":
+&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-recreate-deployment,GenerateName:,Namespace:deployment-9213,SelfLink:/apis/apps/v1/namespaces/deployment-9213/deployments/test-recreate-deployment,UID:4bf09bad-2b9e-4af2-a3b9-bfd7f4b91bc3,ResourceVersion:16181,Generation:2,CreationTimestamp:2019-06-20 10:36:13 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,},Annotations:map[string]string{deployment.kubernetes.io/revision: 2,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:DeploymentSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,PreemptionPolicy:nil,},},Strategy:DeploymentStrategy{Type:Recreate,RollingUpdate:nil,},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:1,UpdatedReplicas:1,AvailableReplicas:0,UnavailableReplicas:1,Conditions:[{Available False 2019-06-20 10:36:17 +0000 UTC 2019-06-20 10:36:17 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} {Progressing True 2019-06-20 10:36:17 +0000 UTC 2019-06-20 10:36:13 +0000 UTC ReplicaSetUpdated ReplicaSet "test-recreate-deployment-5c8c9cc69d" is progressing.}],ReadyReplicas:0,CollisionCount:nil,},}
+
+Jun 20 10:36:17.457: INFO: New ReplicaSet "test-recreate-deployment-5c8c9cc69d" of Deployment "test-recreate-deployment":
+&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-recreate-deployment-5c8c9cc69d,GenerateName:,Namespace:deployment-9213,SelfLink:/apis/apps/v1/namespaces/deployment-9213/replicasets/test-recreate-deployment-5c8c9cc69d,UID:6739520c-9fa4-443e-bfb7-c99b4a8004f4,ResourceVersion:16178,Generation:1,CreationTimestamp:2019-06-20 10:36:17 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: 5c8c9cc69d,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 1,deployment.kubernetes.io/revision: 2,},OwnerReferences:[{apps/v1 Deployment test-recreate-deployment 4bf09bad-2b9e-4af2-a3b9-bfd7f4b91bc3 0xc001436ee7 0xc001436ee8}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,pod-template-hash: 5c8c9cc69d,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: 5c8c9cc69d,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,PreemptionPolicy:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},}
+Jun 20 10:36:17.457: INFO: All old ReplicaSets of Deployment "test-recreate-deployment":
+Jun 20 10:36:17.457: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-recreate-deployment-6df85df6b9,GenerateName:,Namespace:deployment-9213,SelfLink:/apis/apps/v1/namespaces/deployment-9213/replicasets/test-recreate-deployment-6df85df6b9,UID:a886193b-6f1c-43e7-b99a-09121b2a7266,ResourceVersion:16169,Generation:2,CreationTimestamp:2019-06-20 10:36:13 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: 6df85df6b9,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 1,deployment.kubernetes.io/revision: 1,},OwnerReferences:[{apps/v1 Deployment test-recreate-deployment 4bf09bad-2b9e-4af2-a3b9-bfd7f4b91bc3 0xc001437317 0xc001437318}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*0,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,pod-template-hash: 6df85df6b9,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: 6df85df6b9,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,PreemptionPolicy:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},}
+Jun 20 10:36:17.460: INFO: Pod "test-recreate-deployment-5c8c9cc69d-mh9gh" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-recreate-deployment-5c8c9cc69d-mh9gh,GenerateName:test-recreate-deployment-5c8c9cc69d-,Namespace:deployment-9213,SelfLink:/api/v1/namespaces/deployment-9213/pods/test-recreate-deployment-5c8c9cc69d-mh9gh,UID:ce3c3f34-c570-4b2f-bcde-88020a557823,ResourceVersion:16180,Generation:0,CreationTimestamp:2019-06-20 10:36:17 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: 5c8c9cc69d,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet test-recreate-deployment-5c8c9cc69d 6739520c-9fa4-443e-bfb7-c99b4a8004f4 0xc000f10657 0xc000f10658}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-7qzsq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-7qzsq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-7qzsq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-100-10-111.eu-west-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc000f106c0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc000f106e0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:36:17 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:36:17 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:36:17 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:36:17 +0000 UTC  }],Message:,Reason:,HostIP:10.100.10.111,PodIP:,StartTime:2019-06-20 10:36:17 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:36:17.460: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "deployment-9213" for this suite.
+Jun 20 10:36:23.472: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:36:23.538: INFO: namespace deployment-9213 deletion completed in 6.075241866s
+
+• [SLOW TEST:10.222 seconds]
+[sig-apps] Deployment
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23
+  RecreateDeployment should delete old pods and create new ones [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Update Demo 
+  should do a rolling update of a replication controller  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:36:23.538: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:221
+[BeforeEach] [k8s.io] Update Demo
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:273
+[It] should do a rolling update of a replication controller  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: creating the initial replication controller
+Jun 20 10:36:23.570: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 create -f - --namespace=kubectl-2297'
+Jun 20 10:36:23.781: INFO: stderr: ""
+Jun 20 10:36:23.781: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n"
+STEP: waiting for all containers in name=update-demo pods to come up.
+Jun 20 10:36:23.781: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-2297'
+Jun 20 10:36:23.860: INFO: stderr: ""
+Jun 20 10:36:23.860: INFO: stdout: "update-demo-nautilus-65pb4 update-demo-nautilus-j9cbw "
+Jun 20 10:36:23.860: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods update-demo-nautilus-65pb4 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-2297'
+Jun 20 10:36:23.925: INFO: stderr: ""
+Jun 20 10:36:23.925: INFO: stdout: ""
+Jun 20 10:36:23.925: INFO: update-demo-nautilus-65pb4 is created but not running
+Jun 20 10:36:28.925: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-2297'
+Jun 20 10:36:28.992: INFO: stderr: ""
+Jun 20 10:36:28.992: INFO: stdout: "update-demo-nautilus-65pb4 update-demo-nautilus-j9cbw "
+Jun 20 10:36:28.992: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods update-demo-nautilus-65pb4 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-2297'
+Jun 20 10:36:29.056: INFO: stderr: ""
+Jun 20 10:36:29.056: INFO: stdout: "true"
+Jun 20 10:36:29.056: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods update-demo-nautilus-65pb4 -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-2297'
+Jun 20 10:36:29.117: INFO: stderr: ""
+Jun 20 10:36:29.117: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+Jun 20 10:36:29.117: INFO: validating pod update-demo-nautilus-65pb4
+Jun 20 10:36:29.123: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+Jun 20 10:36:29.123: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+Jun 20 10:36:29.123: INFO: update-demo-nautilus-65pb4 is verified up and running
+Jun 20 10:36:29.123: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods update-demo-nautilus-j9cbw -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-2297'
+Jun 20 10:36:29.200: INFO: stderr: ""
+Jun 20 10:36:29.200: INFO: stdout: "true"
+Jun 20 10:36:29.200: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods update-demo-nautilus-j9cbw -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-2297'
+Jun 20 10:36:29.272: INFO: stderr: ""
+Jun 20 10:36:29.272: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+Jun 20 10:36:29.272: INFO: validating pod update-demo-nautilus-j9cbw
+Jun 20 10:36:29.278: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+Jun 20 10:36:29.278: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+Jun 20 10:36:29.278: INFO: update-demo-nautilus-j9cbw is verified up and running
+STEP: rolling-update to new replication controller
+Jun 20 10:36:29.288: INFO: scanned /root for discovery docs: 
+Jun 20 10:36:29.288: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 rolling-update update-demo-nautilus --update-period=1s -f - --namespace=kubectl-2297'
+Jun 20 10:36:51.726: INFO: stderr: "Command \"rolling-update\" is deprecated, use \"rollout\" instead\n"
+Jun 20 10:36:51.726: INFO: stdout: "Created update-demo-kitten\nScaling up update-demo-kitten from 0 to 2, scaling down update-demo-nautilus from 2 to 0 (keep 2 pods available, don't exceed 3 pods)\nScaling update-demo-kitten up to 1\nScaling update-demo-nautilus down to 1\nScaling update-demo-kitten up to 2\nScaling update-demo-nautilus down to 0\nUpdate succeeded. Deleting old controller: update-demo-nautilus\nRenaming update-demo-kitten to update-demo-nautilus\nreplicationcontroller/update-demo-nautilus rolling updated\n"
+STEP: waiting for all containers in name=update-demo pods to come up.
+Jun 20 10:36:51.726: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-2297'
+Jun 20 10:36:51.795: INFO: stderr: ""
+Jun 20 10:36:51.795: INFO: stdout: "update-demo-kitten-g2gj4 update-demo-kitten-r7hk8 "
+Jun 20 10:36:51.795: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods update-demo-kitten-g2gj4 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-2297'
+Jun 20 10:36:51.859: INFO: stderr: ""
+Jun 20 10:36:51.859: INFO: stdout: "true"
+Jun 20 10:36:51.859: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods update-demo-kitten-g2gj4 -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-2297'
+Jun 20 10:36:51.922: INFO: stderr: ""
+Jun 20 10:36:51.922: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/kitten:1.0"
+Jun 20 10:36:51.922: INFO: validating pod update-demo-kitten-g2gj4
+Jun 20 10:36:51.928: INFO: got data: {
+  "image": "kitten.jpg"
+}
+
+Jun 20 10:36:51.928: INFO: Unmarshalled json jpg/img => {kitten.jpg} , expecting kitten.jpg .
+Jun 20 10:36:51.928: INFO: update-demo-kitten-g2gj4 is verified up and running
+Jun 20 10:36:51.928: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods update-demo-kitten-r7hk8 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-2297'
+Jun 20 10:36:51.993: INFO: stderr: ""
+Jun 20 10:36:51.993: INFO: stdout: "true"
+Jun 20 10:36:51.993: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods update-demo-kitten-r7hk8 -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-2297'
+Jun 20 10:36:52.059: INFO: stderr: ""
+Jun 20 10:36:52.059: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/kitten:1.0"
+Jun 20 10:36:52.059: INFO: validating pod update-demo-kitten-r7hk8
+Jun 20 10:36:52.065: INFO: got data: {
+  "image": "kitten.jpg"
+}
+
+Jun 20 10:36:52.065: INFO: Unmarshalled json jpg/img => {kitten.jpg} , expecting kitten.jpg .
+Jun 20 10:36:52.065: INFO: update-demo-kitten-r7hk8 is verified up and running
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:36:52.065: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-2297" for this suite.
+Jun 20 10:37:14.087: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:37:14.166: INFO: namespace kubectl-2297 deletion completed in 22.091234979s
+
+• [SLOW TEST:50.628 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Update Demo
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+    should do a rolling update of a replication controller  [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSS
+------------------------------
+[sig-network] Networking Granular Checks: Pods 
+  should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-network] Networking
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:37:14.166: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename pod-network-test
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Performing setup for networking test in namespace pod-network-test-27
+STEP: creating a selector
+STEP: Creating the service pods in kubernetes
+Jun 20 10:37:14.200: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable
+STEP: Creating test pods
+Jun 20 10:37:40.279: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostName | nc -w 1 -u 10.38.0.2 8081 | grep -v '^\s*$'] Namespace:pod-network-test-27 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 20 10:37:40.279: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+Jun 20 10:37:41.373: INFO: Found all expected endpoints: [netserver-0]
+Jun 20 10:37:41.376: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostName | nc -w 1 -u 10.34.0.2 8081 | grep -v '^\s*$'] Namespace:pod-network-test-27 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 20 10:37:41.376: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+Jun 20 10:37:42.520: INFO: Found all expected endpoints: [netserver-1]
+[AfterEach] [sig-network] Networking
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:37:42.521: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pod-network-test-27" for this suite.
+Jun 20 10:38:04.534: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:38:04.600: INFO: namespace pod-network-test-27 deletion completed in 22.075813944s
+
+• [SLOW TEST:50.434 seconds]
+[sig-network] Networking
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:25
+  Granular Checks: Pods
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:28
+    should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Kubelet when scheduling a read only busybox container 
+  should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:38:04.600: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename kubelet-test
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37
+[It] should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[AfterEach] [k8s.io] Kubelet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:38:06.663: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubelet-test-7600" for this suite.
+Jun 20 10:38:44.675: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:38:44.745: INFO: namespace kubelet-test-7600 deletion completed in 38.079864271s
+
+• [SLOW TEST:40.145 seconds]
+[k8s.io] Kubelet
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  when scheduling a read only busybox container
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:187
+    should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should provide container's cpu limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:38:44.746: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should provide container's cpu limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test downward API volume plugin
+Jun 20 10:38:44.786: INFO: Waiting up to 5m0s for pod "downwardapi-volume-c750676a-9cd6-472b-b5c2-e1010ba39d12" in namespace "projected-7276" to be "success or failure"
+Jun 20 10:38:44.790: INFO: Pod "downwardapi-volume-c750676a-9cd6-472b-b5c2-e1010ba39d12": Phase="Pending", Reason="", readiness=false. Elapsed: 3.877084ms
+Jun 20 10:38:46.793: INFO: Pod "downwardapi-volume-c750676a-9cd6-472b-b5c2-e1010ba39d12": Phase="Pending", Reason="", readiness=false. Elapsed: 2.007280546s
+Jun 20 10:38:48.796: INFO: Pod "downwardapi-volume-c750676a-9cd6-472b-b5c2-e1010ba39d12": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01056258s
+STEP: Saw pod success
+Jun 20 10:38:48.796: INFO: Pod "downwardapi-volume-c750676a-9cd6-472b-b5c2-e1010ba39d12" satisfied condition "success or failure"
+Jun 20 10:38:48.799: INFO: Trying to get logs from node ip-10-100-12-226.eu-west-1.compute.internal pod downwardapi-volume-c750676a-9cd6-472b-b5c2-e1010ba39d12 container client-container: 
+STEP: delete the pod
+Jun 20 10:38:48.816: INFO: Waiting for pod downwardapi-volume-c750676a-9cd6-472b-b5c2-e1010ba39d12 to disappear
+Jun 20 10:38:48.818: INFO: Pod downwardapi-volume-c750676a-9cd6-472b-b5c2-e1010ba39d12 no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:38:48.819: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-7276" for this suite.
+Jun 20 10:38:54.834: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:38:55.205: INFO: namespace projected-7276 deletion completed in 6.381953474s
+
+• [SLOW TEST:10.459 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should provide container's cpu limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSS
+------------------------------
+[sig-apps] Daemon set [Serial] 
+  should rollback without unnecessary restarts [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:38:55.205: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename daemonsets
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:103
+[It] should rollback without unnecessary restarts [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+Jun 20 10:38:55.251: INFO: Create a RollingUpdate DaemonSet
+Jun 20 10:38:55.256: INFO: Check that daemon pods launch on every node of the cluster
+Jun 20 10:38:55.259: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:38:55.259: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:38:55.259: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:38:55.261: INFO: Number of nodes with available pods: 0
+Jun 20 10:38:55.261: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 10:38:56.265: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:38:56.265: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:38:56.265: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:38:56.267: INFO: Number of nodes with available pods: 0
+Jun 20 10:38:56.267: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 10:38:57.265: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:38:57.265: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:38:57.265: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:38:57.268: INFO: Number of nodes with available pods: 2
+Jun 20 10:38:57.268: INFO: Number of running nodes: 2, number of available pods: 2
+Jun 20 10:38:57.268: INFO: Update the DaemonSet to trigger a rollout
+Jun 20 10:38:57.274: INFO: Updating DaemonSet daemon-set
+Jun 20 10:39:02.291: INFO: Roll back the DaemonSet before rollout is complete
+Jun 20 10:39:02.299: INFO: Updating DaemonSet daemon-set
+Jun 20 10:39:02.299: INFO: Make sure DaemonSet rollback is complete
+Jun 20 10:39:02.304: INFO: Wrong image for pod: daemon-set-hpkrc. Expected: docker.io/library/nginx:1.14-alpine, got: foo:non-existent.
+Jun 20 10:39:02.304: INFO: Pod daemon-set-hpkrc is not available
+Jun 20 10:39:02.309: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:39:02.309: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:39:02.309: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:39:03.321: INFO: Wrong image for pod: daemon-set-hpkrc. Expected: docker.io/library/nginx:1.14-alpine, got: foo:non-existent.
+Jun 20 10:39:03.321: INFO: Pod daemon-set-hpkrc is not available
+Jun 20 10:39:03.325: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:39:03.325: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:39:03.325: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:39:04.313: INFO: Wrong image for pod: daemon-set-hpkrc. Expected: docker.io/library/nginx:1.14-alpine, got: foo:non-existent.
+Jun 20 10:39:04.313: INFO: Pod daemon-set-hpkrc is not available
+Jun 20 10:39:04.316: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:39:04.316: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:39:04.316: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:39:05.313: INFO: Wrong image for pod: daemon-set-hpkrc. Expected: docker.io/library/nginx:1.14-alpine, got: foo:non-existent.
+Jun 20 10:39:05.313: INFO: Pod daemon-set-hpkrc is not available
+Jun 20 10:39:05.316: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:39:05.316: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:39:05.316: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:39:06.314: INFO: Wrong image for pod: daemon-set-hpkrc. Expected: docker.io/library/nginx:1.14-alpine, got: foo:non-existent.
+Jun 20 10:39:06.314: INFO: Pod daemon-set-hpkrc is not available
+Jun 20 10:39:06.335: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:39:06.335: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:39:06.335: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:39:07.313: INFO: Pod daemon-set-zwwg5 is not available
+Jun 20 10:39:07.316: INFO: DaemonSet pods can't tolerate node ip-10-100-10-235.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:39:07.316: INFO: DaemonSet pods can't tolerate node ip-10-100-11-16.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun 20 10:39:07.316: INFO: DaemonSet pods can't tolerate node ip-10-100-12-41.eu-west-1.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:69
+STEP: Deleting DaemonSet "daemon-set"
+STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-3505, will wait for the garbage collector to delete the pods
+Jun 20 10:39:07.380: INFO: Deleting DaemonSet.extensions daemon-set took: 6.85403ms
+Jun 20 10:39:07.680: INFO: Terminating DaemonSet.extensions daemon-set pods took: 300.231825ms
+Jun 20 10:39:09.483: INFO: Number of nodes with available pods: 0
+Jun 20 10:39:09.483: INFO: Number of running nodes: 0, number of available pods: 0
+Jun 20 10:39:09.486: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/daemonsets-3505/daemonsets","resourceVersion":"16857"},"items":null}
+
+Jun 20 10:39:09.488: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/daemonsets-3505/pods","resourceVersion":"16857"},"items":null}
+
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:39:09.494: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "daemonsets-3505" for this suite.
+Jun 20 10:39:15.507: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:39:15.621: INFO: namespace daemonsets-3505 deletion completed in 6.124391266s
+
+• [SLOW TEST:20.416 seconds]
+[sig-apps] Daemon set [Serial]
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23
+  should rollback without unnecessary restarts [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-node] Downward API 
+  should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-node] Downward API
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:39:15.622: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test downward api env vars
+Jun 20 10:39:15.666: INFO: Waiting up to 5m0s for pod "downward-api-8deef532-4eaf-4c6d-9a8b-94c387143033" in namespace "downward-api-5027" to be "success or failure"
+Jun 20 10:39:15.674: INFO: Pod "downward-api-8deef532-4eaf-4c6d-9a8b-94c387143033": Phase="Pending", Reason="", readiness=false. Elapsed: 7.094519ms
+Jun 20 10:39:17.677: INFO: Pod "downward-api-8deef532-4eaf-4c6d-9a8b-94c387143033": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.010361325s
+STEP: Saw pod success
+Jun 20 10:39:17.677: INFO: Pod "downward-api-8deef532-4eaf-4c6d-9a8b-94c387143033" satisfied condition "success or failure"
+Jun 20 10:39:17.680: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod downward-api-8deef532-4eaf-4c6d-9a8b-94c387143033 container dapi-container: 
+STEP: delete the pod
+Jun 20 10:39:17.697: INFO: Waiting for pod downward-api-8deef532-4eaf-4c6d-9a8b-94c387143033 to disappear
+Jun 20 10:39:17.699: INFO: Pod downward-api-8deef532-4eaf-4c6d-9a8b-94c387143033 no longer exists
+[AfterEach] [sig-node] Downward API
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:39:17.699: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-5027" for this suite.
+Jun 20 10:39:23.714: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:39:23.781: INFO: namespace downward-api-5027 deletion completed in 6.077857434s
+
+• [SLOW TEST:8.159 seconds]
+[sig-node] Downward API
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:32
+  should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSS
+------------------------------
+[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] 
+  should perform rolling updates and roll backs of template modifications [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:39:23.781: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename statefulset
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:60
+[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:75
+STEP: Creating service test in namespace statefulset-4599
+[It] should perform rolling updates and roll backs of template modifications [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a new StatefulSet
+Jun 20 10:39:23.837: INFO: Found 0 stateful pods, waiting for 3
+Jun 20 10:39:33.841: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true
+Jun 20 10:39:33.841: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true
+Jun 20 10:39:33.841: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true
+Jun 20 10:39:33.852: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 exec --namespace=statefulset-4599 ss2-1 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+Jun 20 10:39:34.205: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n"
+Jun 20 10:39:34.205: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+Jun 20 10:39:34.205: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss2-1: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+STEP: Updating StatefulSet template: update image from docker.io/library/nginx:1.14-alpine to docker.io/library/nginx:1.15-alpine
+Jun 20 10:39:44.235: INFO: Updating stateful set ss2
+STEP: Creating a new revision
+STEP: Updating Pods in reverse ordinal order
+Jun 20 10:39:54.257: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 exec --namespace=statefulset-4599 ss2-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun 20 10:39:54.410: INFO: stderr: "+ mv -v /tmp/index.html /usr/share/nginx/html/\n"
+Jun 20 10:39:54.410: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+Jun 20 10:39:54.410: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss2-1: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+Jun 20 10:40:04.432: INFO: Waiting for StatefulSet statefulset-4599/ss2 to complete update
+Jun 20 10:40:04.432: INFO: Waiting for Pod statefulset-4599/ss2-0 to have revision ss2-6c5cd755cd update revision ss2-7c9b54fd4c
+Jun 20 10:40:04.432: INFO: Waiting for Pod statefulset-4599/ss2-1 to have revision ss2-6c5cd755cd update revision ss2-7c9b54fd4c
+Jun 20 10:40:04.432: INFO: Waiting for Pod statefulset-4599/ss2-2 to have revision ss2-6c5cd755cd update revision ss2-7c9b54fd4c
+Jun 20 10:40:14.439: INFO: Waiting for StatefulSet statefulset-4599/ss2 to complete update
+Jun 20 10:40:14.439: INFO: Waiting for Pod statefulset-4599/ss2-0 to have revision ss2-6c5cd755cd update revision ss2-7c9b54fd4c
+Jun 20 10:40:24.438: INFO: Waiting for StatefulSet statefulset-4599/ss2 to complete update
+Jun 20 10:40:24.438: INFO: Waiting for Pod statefulset-4599/ss2-0 to have revision ss2-6c5cd755cd update revision ss2-7c9b54fd4c
+STEP: Rolling back to a previous revision
+Jun 20 10:40:34.439: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 exec --namespace=statefulset-4599 ss2-1 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+Jun 20 10:40:34.595: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n"
+Jun 20 10:40:34.595: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+Jun 20 10:40:34.595: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss2-1: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+Jun 20 10:40:44.624: INFO: Updating stateful set ss2
+STEP: Rolling back update in reverse ordinal order
+Jun 20 10:40:54.640: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 exec --namespace=statefulset-4599 ss2-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun 20 10:40:54.787: INFO: stderr: "+ mv -v /tmp/index.html /usr/share/nginx/html/\n"
+Jun 20 10:40:54.787: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+Jun 20 10:40:54.787: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss2-1: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+Jun 20 10:41:04.805: INFO: Waiting for StatefulSet statefulset-4599/ss2 to complete update
+Jun 20 10:41:04.805: INFO: Waiting for Pod statefulset-4599/ss2-0 to have revision ss2-7c9b54fd4c update revision ss2-6c5cd755cd
+Jun 20 10:41:04.805: INFO: Waiting for Pod statefulset-4599/ss2-1 to have revision ss2-7c9b54fd4c update revision ss2-6c5cd755cd
+Jun 20 10:41:04.805: INFO: Waiting for Pod statefulset-4599/ss2-2 to have revision ss2-7c9b54fd4c update revision ss2-6c5cd755cd
+Jun 20 10:41:14.811: INFO: Waiting for StatefulSet statefulset-4599/ss2 to complete update
+Jun 20 10:41:14.811: INFO: Waiting for Pod statefulset-4599/ss2-0 to have revision ss2-7c9b54fd4c update revision ss2-6c5cd755cd
+Jun 20 10:41:14.811: INFO: Waiting for Pod statefulset-4599/ss2-1 to have revision ss2-7c9b54fd4c update revision ss2-6c5cd755cd
+Jun 20 10:41:24.810: INFO: Waiting for StatefulSet statefulset-4599/ss2 to complete update
+Jun 20 10:41:24.810: INFO: Waiting for Pod statefulset-4599/ss2-0 to have revision ss2-7c9b54fd4c update revision ss2-6c5cd755cd
+[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:86
+Jun 20 10:41:34.824: INFO: Deleting all statefulset in ns statefulset-4599
+Jun 20 10:41:34.826: INFO: Scaling statefulset ss2 to 0
+Jun 20 10:41:54.841: INFO: Waiting for statefulset status.replicas updated to 0
+Jun 20 10:41:54.843: INFO: Deleting statefulset ss2
+[AfterEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:41:54.854: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "statefulset-4599" for this suite.
+Jun 20 10:42:00.869: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:42:00.942: INFO: namespace statefulset-4599 deletion completed in 6.082820313s
+
+• [SLOW TEST:157.161 seconds]
+[sig-apps] StatefulSet
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23
+  [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+    should perform rolling updates and roll backs of template modifications [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSS
+------------------------------
+[sig-network] Services 
+  should serve a basic endpoint from pods  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:42:00.942: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename services
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:88
+[It] should serve a basic endpoint from pods  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: creating service endpoint-test2 in namespace services-5997
+STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-5997 to expose endpoints map[]
+Jun 20 10:42:00.988: INFO: Get endpoints failed (4.192001ms elapsed, ignoring for 5s): endpoints "endpoint-test2" not found
+Jun 20 10:42:01.991: INFO: successfully validated that service endpoint-test2 in namespace services-5997 exposes endpoints map[] (1.007556311s elapsed)
+STEP: Creating pod pod1 in namespace services-5997
+STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-5997 to expose endpoints map[pod1:[80]]
+Jun 20 10:42:04.030: INFO: successfully validated that service endpoint-test2 in namespace services-5997 exposes endpoints map[pod1:[80]] (2.030938875s elapsed)
+STEP: Creating pod pod2 in namespace services-5997
+STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-5997 to expose endpoints map[pod1:[80] pod2:[80]]
+Jun 20 10:42:06.064: INFO: successfully validated that service endpoint-test2 in namespace services-5997 exposes endpoints map[pod1:[80] pod2:[80]] (2.026791522s elapsed)
+STEP: Deleting pod pod1 in namespace services-5997
+STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-5997 to expose endpoints map[pod2:[80]]
+Jun 20 10:42:06.090: INFO: successfully validated that service endpoint-test2 in namespace services-5997 exposes endpoints map[pod2:[80]] (17.141066ms elapsed)
+STEP: Deleting pod pod2 in namespace services-5997
+STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-5997 to expose endpoints map[]
+Jun 20 10:42:07.104: INFO: successfully validated that service endpoint-test2 in namespace services-5997 exposes endpoints map[] (1.00720235s elapsed)
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:42:07.120: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "services-5997" for this suite.
+Jun 20 10:42:13.133: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:42:13.203: INFO: namespace services-5997 deletion completed in 6.078989573s
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:92
+
+• [SLOW TEST:12.261 seconds]
+[sig-network] Services
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23
+  should serve a basic endpoint from pods  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:42:13.203: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test emptydir 0777 on tmpfs
+Jun 20 10:42:13.252: INFO: Waiting up to 5m0s for pod "pod-7253368c-dcc3-4359-91b9-92400d61c2de" in namespace "emptydir-8907" to be "success or failure"
+Jun 20 10:42:13.256: INFO: Pod "pod-7253368c-dcc3-4359-91b9-92400d61c2de": Phase="Pending", Reason="", readiness=false. Elapsed: 3.805569ms
+Jun 20 10:42:15.259: INFO: Pod "pod-7253368c-dcc3-4359-91b9-92400d61c2de": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.007196044s
+STEP: Saw pod success
+Jun 20 10:42:15.259: INFO: Pod "pod-7253368c-dcc3-4359-91b9-92400d61c2de" satisfied condition "success or failure"
+Jun 20 10:42:15.262: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-7253368c-dcc3-4359-91b9-92400d61c2de container test-container: 
+STEP: delete the pod
+Jun 20 10:42:15.278: INFO: Waiting for pod pod-7253368c-dcc3-4359-91b9-92400d61c2de to disappear
+Jun 20 10:42:15.280: INFO: Pod pod-7253368c-dcc3-4359-91b9-92400d61c2de no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:42:15.280: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-8907" for this suite.
+Jun 20 10:42:21.294: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:42:21.369: INFO: namespace emptydir-8907 deletion completed in 6.085469131s
+
+• [SLOW TEST:8.166 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-network] Services 
+  should provide secure master service  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:42:21.369: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename services
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:88
+[It] should provide secure master service  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:42:21.402: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "services-9493" for this suite.
+Jun 20 10:42:27.414: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:42:27.512: INFO: namespace services-9493 deletion completed in 6.10756042s
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:92
+
+• [SLOW TEST:6.143 seconds]
+[sig-network] Services
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23
+  should provide secure master service  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:42:27.512: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test emptydir 0644 on tmpfs
+Jun 20 10:42:27.566: INFO: Waiting up to 5m0s for pod "pod-69417ce9-702b-4b1b-ac34-37a7e5df277b" in namespace "emptydir-3597" to be "success or failure"
+Jun 20 10:42:27.569: INFO: Pod "pod-69417ce9-702b-4b1b-ac34-37a7e5df277b": Phase="Pending", Reason="", readiness=false. Elapsed: 2.93066ms
+Jun 20 10:42:29.573: INFO: Pod "pod-69417ce9-702b-4b1b-ac34-37a7e5df277b": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006878242s
+STEP: Saw pod success
+Jun 20 10:42:29.573: INFO: Pod "pod-69417ce9-702b-4b1b-ac34-37a7e5df277b" satisfied condition "success or failure"
+Jun 20 10:42:29.576: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-69417ce9-702b-4b1b-ac34-37a7e5df277b container test-container: 
+STEP: delete the pod
+Jun 20 10:42:29.593: INFO: Waiting for pod pod-69417ce9-702b-4b1b-ac34-37a7e5df277b to disappear
+Jun 20 10:42:29.596: INFO: Pod pod-69417ce9-702b-4b1b-ac34-37a7e5df277b no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:42:29.596: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-3597" for this suite.
+Jun 20 10:42:35.608: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:42:35.674: INFO: namespace emptydir-3597 deletion completed in 6.075400823s
+
+• [SLOW TEST:8.162 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should update labels on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:42:35.674: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should update labels on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating the pod
+Jun 20 10:42:38.238: INFO: Successfully updated pod "labelsupdate638fe11b-bda2-4b18-9842-97f5258e4d88"
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:42:40.264: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-6978" for this suite.
+Jun 20 10:43:02.277: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:43:02.349: INFO: namespace projected-6978 deletion completed in 22.081778639s
+
+• [SLOW TEST:26.675 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should update labels on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+[k8s.io] Container Runtime blackbox test when starting a container that exits 
+  should run with the expected status [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Container Runtime
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:43:02.349: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename container-runtime
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should run with the expected status [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Container 'terminate-cmd-rpa': should get the expected 'RestartCount'
+STEP: Container 'terminate-cmd-rpa': should get the expected 'Phase'
+STEP: Container 'terminate-cmd-rpa': should get the expected 'Ready' condition
+STEP: Container 'terminate-cmd-rpa': should get the expected 'State'
+STEP: Container 'terminate-cmd-rpa': should be possible to delete [NodeConformance]
+STEP: Container 'terminate-cmd-rpof': should get the expected 'RestartCount'
+STEP: Container 'terminate-cmd-rpof': should get the expected 'Phase'
+STEP: Container 'terminate-cmd-rpof': should get the expected 'Ready' condition
+STEP: Container 'terminate-cmd-rpof': should get the expected 'State'
+STEP: Container 'terminate-cmd-rpof': should be possible to delete [NodeConformance]
+STEP: Container 'terminate-cmd-rpn': should get the expected 'RestartCount'
+STEP: Container 'terminate-cmd-rpn': should get the expected 'Phase'
+STEP: Container 'terminate-cmd-rpn': should get the expected 'Ready' condition
+STEP: Container 'terminate-cmd-rpn': should get the expected 'State'
+STEP: Container 'terminate-cmd-rpn': should be possible to delete [NodeConformance]
+[AfterEach] [k8s.io] Container Runtime
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:43:24.580: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-runtime-2956" for this suite.
+Jun 20 10:43:30.595: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:43:30.671: INFO: namespace container-runtime-2956 deletion completed in 6.088020111s
+
+• [SLOW TEST:28.322 seconds]
+[k8s.io] Container Runtime
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  blackbox test
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/runtime.go:38
+    when starting a container that exits
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/runtime.go:39
+      should run with the expected status [NodeConformance] [Conformance]
+      /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Update Demo 
+  should scale a replication controller  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:43:30.671: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:221
+[BeforeEach] [k8s.io] Update Demo
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:273
+[It] should scale a replication controller  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: creating a replication controller
+Jun 20 10:43:30.708: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 create -f - --namespace=kubectl-3072'
+Jun 20 10:43:31.144: INFO: stderr: ""
+Jun 20 10:43:31.144: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n"
+STEP: waiting for all containers in name=update-demo pods to come up.
+Jun 20 10:43:31.144: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-3072'
+Jun 20 10:43:31.227: INFO: stderr: ""
+Jun 20 10:43:31.227: INFO: stdout: "update-demo-nautilus-66fws update-demo-nautilus-bdtwp "
+Jun 20 10:43:31.227: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods update-demo-nautilus-66fws -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-3072'
+Jun 20 10:43:31.287: INFO: stderr: ""
+Jun 20 10:43:31.287: INFO: stdout: ""
+Jun 20 10:43:31.287: INFO: update-demo-nautilus-66fws is created but not running
+Jun 20 10:43:36.287: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-3072'
+Jun 20 10:43:36.361: INFO: stderr: ""
+Jun 20 10:43:36.361: INFO: stdout: "update-demo-nautilus-66fws update-demo-nautilus-bdtwp "
+Jun 20 10:43:36.361: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods update-demo-nautilus-66fws -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-3072'
+Jun 20 10:43:36.433: INFO: stderr: ""
+Jun 20 10:43:36.433: INFO: stdout: "true"
+Jun 20 10:43:36.433: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods update-demo-nautilus-66fws -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-3072'
+Jun 20 10:43:36.495: INFO: stderr: ""
+Jun 20 10:43:36.495: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+Jun 20 10:43:36.495: INFO: validating pod update-demo-nautilus-66fws
+Jun 20 10:43:36.508: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+Jun 20 10:43:36.508: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+Jun 20 10:43:36.508: INFO: update-demo-nautilus-66fws is verified up and running
+Jun 20 10:43:36.508: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods update-demo-nautilus-bdtwp -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-3072'
+Jun 20 10:43:36.576: INFO: stderr: ""
+Jun 20 10:43:36.576: INFO: stdout: "true"
+Jun 20 10:43:36.576: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods update-demo-nautilus-bdtwp -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-3072'
+Jun 20 10:43:36.643: INFO: stderr: ""
+Jun 20 10:43:36.643: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+Jun 20 10:43:36.643: INFO: validating pod update-demo-nautilus-bdtwp
+Jun 20 10:43:36.650: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+Jun 20 10:43:36.650: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+Jun 20 10:43:36.651: INFO: update-demo-nautilus-bdtwp is verified up and running
+STEP: scaling down the replication controller
+Jun 20 10:43:36.652: INFO: scanned /root for discovery docs: 
+Jun 20 10:43:36.652: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 scale rc update-demo-nautilus --replicas=1 --timeout=5m --namespace=kubectl-3072'
+Jun 20 10:43:37.739: INFO: stderr: ""
+Jun 20 10:43:37.739: INFO: stdout: "replicationcontroller/update-demo-nautilus scaled\n"
+STEP: waiting for all containers in name=update-demo pods to come up.
+Jun 20 10:43:37.740: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-3072'
+Jun 20 10:43:37.816: INFO: stderr: ""
+Jun 20 10:43:37.816: INFO: stdout: "update-demo-nautilus-66fws update-demo-nautilus-bdtwp "
+STEP: Replicas for name=update-demo: expected=1 actual=2
+Jun 20 10:43:42.817: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-3072'
+Jun 20 10:43:42.882: INFO: stderr: ""
+Jun 20 10:43:42.882: INFO: stdout: "update-demo-nautilus-66fws update-demo-nautilus-bdtwp "
+STEP: Replicas for name=update-demo: expected=1 actual=2
+Jun 20 10:43:47.882: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-3072'
+Jun 20 10:43:47.949: INFO: stderr: ""
+Jun 20 10:43:47.949: INFO: stdout: "update-demo-nautilus-bdtwp "
+Jun 20 10:43:47.949: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods update-demo-nautilus-bdtwp -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-3072'
+Jun 20 10:43:48.016: INFO: stderr: ""
+Jun 20 10:43:48.016: INFO: stdout: "true"
+Jun 20 10:43:48.016: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods update-demo-nautilus-bdtwp -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-3072'
+Jun 20 10:43:48.083: INFO: stderr: ""
+Jun 20 10:43:48.083: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+Jun 20 10:43:48.083: INFO: validating pod update-demo-nautilus-bdtwp
+Jun 20 10:43:48.088: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+Jun 20 10:43:48.088: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+Jun 20 10:43:48.088: INFO: update-demo-nautilus-bdtwp is verified up and running
+STEP: scaling up the replication controller
+Jun 20 10:43:48.089: INFO: scanned /root for discovery docs: 
+Jun 20 10:43:48.089: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 scale rc update-demo-nautilus --replicas=2 --timeout=5m --namespace=kubectl-3072'
+Jun 20 10:43:49.177: INFO: stderr: ""
+Jun 20 10:43:49.177: INFO: stdout: "replicationcontroller/update-demo-nautilus scaled\n"
+STEP: waiting for all containers in name=update-demo pods to come up.
+Jun 20 10:43:49.177: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-3072'
+Jun 20 10:43:49.259: INFO: stderr: ""
+Jun 20 10:43:49.259: INFO: stdout: "update-demo-nautilus-22h29 update-demo-nautilus-bdtwp "
+Jun 20 10:43:49.259: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods update-demo-nautilus-22h29 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-3072'
+Jun 20 10:43:49.360: INFO: stderr: ""
+Jun 20 10:43:49.360: INFO: stdout: ""
+Jun 20 10:43:49.360: INFO: update-demo-nautilus-22h29 is created but not running
+Jun 20 10:43:54.361: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-3072'
+Jun 20 10:43:54.426: INFO: stderr: ""
+Jun 20 10:43:54.426: INFO: stdout: "update-demo-nautilus-22h29 update-demo-nautilus-bdtwp "
+Jun 20 10:43:54.426: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods update-demo-nautilus-22h29 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-3072'
+Jun 20 10:43:54.491: INFO: stderr: ""
+Jun 20 10:43:54.491: INFO: stdout: "true"
+Jun 20 10:43:54.491: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods update-demo-nautilus-22h29 -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-3072'
+Jun 20 10:43:54.565: INFO: stderr: ""
+Jun 20 10:43:54.565: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+Jun 20 10:43:54.565: INFO: validating pod update-demo-nautilus-22h29
+Jun 20 10:43:54.571: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+Jun 20 10:43:54.571: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+Jun 20 10:43:54.571: INFO: update-demo-nautilus-22h29 is verified up and running
+Jun 20 10:43:54.571: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods update-demo-nautilus-bdtwp -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-3072'
+Jun 20 10:43:54.635: INFO: stderr: ""
+Jun 20 10:43:54.635: INFO: stdout: "true"
+Jun 20 10:43:54.635: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods update-demo-nautilus-bdtwp -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-3072'
+Jun 20 10:43:54.702: INFO: stderr: ""
+Jun 20 10:43:54.702: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+Jun 20 10:43:54.702: INFO: validating pod update-demo-nautilus-bdtwp
+Jun 20 10:43:54.706: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+Jun 20 10:43:54.706: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+Jun 20 10:43:54.707: INFO: update-demo-nautilus-bdtwp is verified up and running
+STEP: using delete to clean up resources
+Jun 20 10:43:54.707: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 delete --grace-period=0 --force -f - --namespace=kubectl-3072'
+Jun 20 10:43:54.776: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+Jun 20 10:43:54.776: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n"
+Jun 20 10:43:54.776: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get rc,svc -l name=update-demo --no-headers --namespace=kubectl-3072'
+Jun 20 10:43:54.844: INFO: stderr: "No resources found.\n"
+Jun 20 10:43:54.844: INFO: stdout: ""
+Jun 20 10:43:54.844: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods -l name=update-demo --namespace=kubectl-3072 -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}'
+Jun 20 10:43:54.911: INFO: stderr: ""
+Jun 20 10:43:54.911: INFO: stdout: "update-demo-nautilus-22h29\nupdate-demo-nautilus-bdtwp\n"
+Jun 20 10:43:55.411: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get rc,svc -l name=update-demo --no-headers --namespace=kubectl-3072'
+Jun 20 10:43:55.484: INFO: stderr: "No resources found.\n"
+Jun 20 10:43:55.484: INFO: stdout: ""
+Jun 20 10:43:55.484: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods -l name=update-demo --namespace=kubectl-3072 -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}'
+Jun 20 10:43:55.548: INFO: stderr: ""
+Jun 20 10:43:55.548: INFO: stdout: ""
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:43:55.548: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-3072" for this suite.
+Jun 20 10:44:17.562: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:44:17.629: INFO: namespace kubectl-3072 deletion completed in 22.077063241s
+
+• [SLOW TEST:46.957 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Update Demo
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+    should scale a replication controller  [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Watchers 
+  should observe add, update, and delete watch notifications on configmaps [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:44:17.629: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename watch
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should observe add, update, and delete watch notifications on configmaps [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: creating a watch on configmaps with label A
+STEP: creating a watch on configmaps with label B
+STEP: creating a watch on configmaps with label A or B
+STEP: creating a configmap with label A and ensuring the correct watchers observe the notification
+Jun 20 10:44:17.671: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:watch-8098,SelfLink:/api/v1/namespaces/watch-8098/configmaps/e2e-watch-test-configmap-a,UID:6ea72f8a-a9dc-4397-966c-84c63905f6c5,ResourceVersion:18108,Generation:0,CreationTimestamp:2019-06-20 10:44:17 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{},BinaryData:map[string][]byte{},}
+Jun 20 10:44:17.671: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:watch-8098,SelfLink:/api/v1/namespaces/watch-8098/configmaps/e2e-watch-test-configmap-a,UID:6ea72f8a-a9dc-4397-966c-84c63905f6c5,ResourceVersion:18108,Generation:0,CreationTimestamp:2019-06-20 10:44:17 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{},BinaryData:map[string][]byte{},}
+STEP: modifying configmap A and ensuring the correct watchers observe the notification
+Jun 20 10:44:27.678: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:watch-8098,SelfLink:/api/v1/namespaces/watch-8098/configmaps/e2e-watch-test-configmap-a,UID:6ea72f8a-a9dc-4397-966c-84c63905f6c5,ResourceVersion:18127,Generation:0,CreationTimestamp:2019-06-20 10:44:17 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},}
+Jun 20 10:44:27.678: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:watch-8098,SelfLink:/api/v1/namespaces/watch-8098/configmaps/e2e-watch-test-configmap-a,UID:6ea72f8a-a9dc-4397-966c-84c63905f6c5,ResourceVersion:18127,Generation:0,CreationTimestamp:2019-06-20 10:44:17 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},}
+STEP: modifying configmap A again and ensuring the correct watchers observe the notification
+Jun 20 10:44:37.685: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:watch-8098,SelfLink:/api/v1/namespaces/watch-8098/configmaps/e2e-watch-test-configmap-a,UID:6ea72f8a-a9dc-4397-966c-84c63905f6c5,ResourceVersion:18146,Generation:0,CreationTimestamp:2019-06-20 10:44:17 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+Jun 20 10:44:37.686: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:watch-8098,SelfLink:/api/v1/namespaces/watch-8098/configmaps/e2e-watch-test-configmap-a,UID:6ea72f8a-a9dc-4397-966c-84c63905f6c5,ResourceVersion:18146,Generation:0,CreationTimestamp:2019-06-20 10:44:17 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+STEP: deleting configmap A and ensuring the correct watchers observe the notification
+Jun 20 10:44:47.692: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:watch-8098,SelfLink:/api/v1/namespaces/watch-8098/configmaps/e2e-watch-test-configmap-a,UID:6ea72f8a-a9dc-4397-966c-84c63905f6c5,ResourceVersion:18168,Generation:0,CreationTimestamp:2019-06-20 10:44:17 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+Jun 20 10:44:47.692: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:watch-8098,SelfLink:/api/v1/namespaces/watch-8098/configmaps/e2e-watch-test-configmap-a,UID:6ea72f8a-a9dc-4397-966c-84c63905f6c5,ResourceVersion:18168,Generation:0,CreationTimestamp:2019-06-20 10:44:17 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+STEP: creating a configmap with label B and ensuring the correct watchers observe the notification
+Jun 20 10:44:57.700: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-b,GenerateName:,Namespace:watch-8098,SelfLink:/api/v1/namespaces/watch-8098/configmaps/e2e-watch-test-configmap-b,UID:a1e2174e-4bb7-4826-b4a5-c83553162e30,ResourceVersion:18189,Generation:0,CreationTimestamp:2019-06-20 10:44:57 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-B,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{},BinaryData:map[string][]byte{},}
+Jun 20 10:44:57.700: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-b,GenerateName:,Namespace:watch-8098,SelfLink:/api/v1/namespaces/watch-8098/configmaps/e2e-watch-test-configmap-b,UID:a1e2174e-4bb7-4826-b4a5-c83553162e30,ResourceVersion:18189,Generation:0,CreationTimestamp:2019-06-20 10:44:57 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-B,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{},BinaryData:map[string][]byte{},}
+STEP: deleting configmap B and ensuring the correct watchers observe the notification
+Jun 20 10:45:07.709: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-b,GenerateName:,Namespace:watch-8098,SelfLink:/api/v1/namespaces/watch-8098/configmaps/e2e-watch-test-configmap-b,UID:a1e2174e-4bb7-4826-b4a5-c83553162e30,ResourceVersion:18208,Generation:0,CreationTimestamp:2019-06-20 10:44:57 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-B,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{},BinaryData:map[string][]byte{},}
+Jun 20 10:45:07.709: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-b,GenerateName:,Namespace:watch-8098,SelfLink:/api/v1/namespaces/watch-8098/configmaps/e2e-watch-test-configmap-b,UID:a1e2174e-4bb7-4826-b4a5-c83553162e30,ResourceVersion:18208,Generation:0,CreationTimestamp:2019-06-20 10:44:57 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-B,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{},BinaryData:map[string][]byte{},}
+[AfterEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:45:17.709: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "watch-8098" for this suite.
+Jun 20 10:45:23.724: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:45:23.796: INFO: namespace watch-8098 deletion completed in 6.083093982s
+
+• [SLOW TEST:66.167 seconds]
+[sig-api-machinery] Watchers
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23
+  should observe add, update, and delete watch notifications on configmaps [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Update Demo 
+  should create and stop a replication controller  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:45:23.796: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:221
+[BeforeEach] [k8s.io] Update Demo
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:273
+[It] should create and stop a replication controller  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: creating a replication controller
+Jun 20 10:45:23.829: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 create -f - --namespace=kubectl-9416'
+Jun 20 10:45:24.019: INFO: stderr: ""
+Jun 20 10:45:24.019: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n"
+STEP: waiting for all containers in name=update-demo pods to come up.
+Jun 20 10:45:24.019: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-9416'
+Jun 20 10:45:24.098: INFO: stderr: ""
+Jun 20 10:45:24.098: INFO: stdout: "update-demo-nautilus-48w9c update-demo-nautilus-7l7kc "
+Jun 20 10:45:24.098: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods update-demo-nautilus-48w9c -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-9416'
+Jun 20 10:45:24.163: INFO: stderr: ""
+Jun 20 10:45:24.163: INFO: stdout: ""
+Jun 20 10:45:24.163: INFO: update-demo-nautilus-48w9c is created but not running
+Jun 20 10:45:29.163: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-9416'
+Jun 20 10:45:29.239: INFO: stderr: ""
+Jun 20 10:45:29.239: INFO: stdout: "update-demo-nautilus-48w9c update-demo-nautilus-7l7kc "
+Jun 20 10:45:29.239: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods update-demo-nautilus-48w9c -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-9416'
+Jun 20 10:45:29.306: INFO: stderr: ""
+Jun 20 10:45:29.306: INFO: stdout: "true"
+Jun 20 10:45:29.306: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods update-demo-nautilus-48w9c -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-9416'
+Jun 20 10:45:29.374: INFO: stderr: ""
+Jun 20 10:45:29.374: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+Jun 20 10:45:29.374: INFO: validating pod update-demo-nautilus-48w9c
+Jun 20 10:45:29.380: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+Jun 20 10:45:29.380: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+Jun 20 10:45:29.380: INFO: update-demo-nautilus-48w9c is verified up and running
+Jun 20 10:45:29.380: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods update-demo-nautilus-7l7kc -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-9416'
+Jun 20 10:45:29.447: INFO: stderr: ""
+Jun 20 10:45:29.447: INFO: stdout: "true"
+Jun 20 10:45:29.447: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods update-demo-nautilus-7l7kc -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-9416'
+Jun 20 10:45:29.513: INFO: stderr: ""
+Jun 20 10:45:29.513: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+Jun 20 10:45:29.513: INFO: validating pod update-demo-nautilus-7l7kc
+Jun 20 10:45:29.519: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+Jun 20 10:45:29.519: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+Jun 20 10:45:29.519: INFO: update-demo-nautilus-7l7kc is verified up and running
+STEP: using delete to clean up resources
+Jun 20 10:45:29.519: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 delete --grace-period=0 --force -f - --namespace=kubectl-9416'
+Jun 20 10:45:29.590: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+Jun 20 10:45:29.590: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n"
+Jun 20 10:45:29.590: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get rc,svc -l name=update-demo --no-headers --namespace=kubectl-9416'
+Jun 20 10:45:29.671: INFO: stderr: "No resources found.\n"
+Jun 20 10:45:29.671: INFO: stdout: ""
+Jun 20 10:45:29.671: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods -l name=update-demo --namespace=kubectl-9416 -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}'
+Jun 20 10:45:29.776: INFO: stderr: ""
+Jun 20 10:45:29.776: INFO: stdout: "update-demo-nautilus-48w9c\nupdate-demo-nautilus-7l7kc\n"
+Jun 20 10:45:30.277: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get rc,svc -l name=update-demo --no-headers --namespace=kubectl-9416'
+Jun 20 10:45:30.343: INFO: stderr: "No resources found.\n"
+Jun 20 10:45:30.343: INFO: stdout: ""
+Jun 20 10:45:30.343: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods -l name=update-demo --namespace=kubectl-9416 -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}'
+Jun 20 10:45:30.411: INFO: stderr: ""
+Jun 20 10:45:30.411: INFO: stdout: ""
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:45:30.411: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-9416" for this suite.
+Jun 20 10:45:52.425: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:45:52.493: INFO: namespace kubectl-9416 deletion completed in 22.078353316s
+
+• [SLOW TEST:28.697 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Update Demo
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+    should create and stop a replication controller  [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSS
+------------------------------
+[sig-apps] Deployment 
+  RollingUpdateDeployment should delete old pods and create new ones [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:45:52.494: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename deployment
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:68
+[It] RollingUpdateDeployment should delete old pods and create new ones [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+Jun 20 10:45:52.528: INFO: Creating replica set "test-rolling-update-controller" (going to be adopted)
+Jun 20 10:45:52.537: INFO: Pod name sample-pod: Found 0 pods out of 1
+Jun 20 10:45:57.540: INFO: Pod name sample-pod: Found 1 pods out of 1
+STEP: ensuring each pod is running
+Jun 20 10:45:57.540: INFO: Creating deployment "test-rolling-update-deployment"
+Jun 20 10:45:57.545: INFO: Ensuring deployment "test-rolling-update-deployment" gets the next revision from the one the adopted replica set "test-rolling-update-controller" has
+Jun 20 10:45:57.552: INFO: new replicaset for deployment "test-rolling-update-deployment" is yet to be created
+Jun 20 10:45:59.558: INFO: Ensuring status for deployment "test-rolling-update-deployment" is the expected
+Jun 20 10:45:59.564: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:2, UpdatedReplicas:1, ReadyReplicas:1, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624357, loc:(*time.Location)(0x80bb5c0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624357, loc:(*time.Location)(0x80bb5c0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624357, loc:(*time.Location)(0x80bb5c0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624357, loc:(*time.Location)(0x80bb5c0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rolling-update-deployment-79f6b9d75c\" is progressing."}}, CollisionCount:(*int32)(nil)}
+Jun 20 10:46:01.569: INFO: Ensuring deployment "test-rolling-update-deployment" has one old replica set (the one it adopted)
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:62
+Jun 20 10:46:01.579: INFO: Deployment "test-rolling-update-deployment":
+&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rolling-update-deployment,GenerateName:,Namespace:deployment-8639,SelfLink:/apis/apps/v1/namespaces/deployment-8639/deployments/test-rolling-update-deployment,UID:90bb600b-b707-4ea4-be18-d2d1ec375181,ResourceVersion:18411,Generation:1,CreationTimestamp:2019-06-20 10:45:57 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,},Annotations:map[string]string{deployment.kubernetes.io/revision: 3546343826724305833,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:DeploymentSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,PreemptionPolicy:nil,},},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[{Available True 2019-06-20 10:45:57 +0000 UTC 2019-06-20 10:45:57 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} {Progressing True 2019-06-20 10:45:59 +0000 UTC 2019-06-20 10:45:57 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-rolling-update-deployment-79f6b9d75c" has successfully progressed.}],ReadyReplicas:1,CollisionCount:nil,},}
+
+Jun 20 10:46:01.582: INFO: New ReplicaSet "test-rolling-update-deployment-79f6b9d75c" of Deployment "test-rolling-update-deployment":
+&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rolling-update-deployment-79f6b9d75c,GenerateName:,Namespace:deployment-8639,SelfLink:/apis/apps/v1/namespaces/deployment-8639/replicasets/test-rolling-update-deployment-79f6b9d75c,UID:1ff1c8c4-3c86-4611-9099-f71dce411c9b,ResourceVersion:18400,Generation:1,CreationTimestamp:2019-06-20 10:45:57 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod-template-hash: 79f6b9d75c,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 3546343826724305833,},OwnerReferences:[{apps/v1 Deployment test-rolling-update-deployment 90bb600b-b707-4ea4-be18-d2d1ec375181 0xc000dd9667 0xc000dd9668}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,pod-template-hash: 79f6b9d75c,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod-template-hash: 79f6b9d75c,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,PreemptionPolicy:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[],},}
+Jun 20 10:46:01.582: INFO: All old ReplicaSets of Deployment "test-rolling-update-deployment":
+Jun 20 10:46:01.583: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rolling-update-controller,GenerateName:,Namespace:deployment-8639,SelfLink:/apis/apps/v1/namespaces/deployment-8639/replicasets/test-rolling-update-controller,UID:96d91cc8-9791-4fd7-aebe-cdacd5184c57,ResourceVersion:18410,Generation:2,CreationTimestamp:2019-06-20 10:45:52 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod: nginx,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 3546343826724305832,},OwnerReferences:[{apps/v1 Deployment test-rolling-update-deployment 90bb600b-b707-4ea4-be18-d2d1ec375181 0xc000dd9597 0xc000dd9598}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*0,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,pod: nginx,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod: nginx,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,PreemptionPolicy:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},}
+Jun 20 10:46:01.587: INFO: Pod "test-rolling-update-deployment-79f6b9d75c-6xwnz" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rolling-update-deployment-79f6b9d75c-6xwnz,GenerateName:test-rolling-update-deployment-79f6b9d75c-,Namespace:deployment-8639,SelfLink:/api/v1/namespaces/deployment-8639/pods/test-rolling-update-deployment-79f6b9d75c-6xwnz,UID:50e2be56-2561-4785-a4b5-9669b9795b5f,ResourceVersion:18399,Generation:0,CreationTimestamp:2019-06-20 10:45:57 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod-template-hash: 79f6b9d75c,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet test-rolling-update-deployment-79f6b9d75c 1ff1c8c4-3c86-4611-9099-f71dce411c9b 0xc000dd9fb7 0xc000dd9fb8}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-8w29l {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-8w29l,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [{default-token-8w29l true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-100-10-111.eu-west-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc0003881e0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc000388250}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:45:57 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:45:59 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:45:59 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:45:57 +0000 UTC  }],Message:,Reason:,HostIP:10.100.10.111,PodIP:10.38.0.3,StartTime:2019-06-20 10:45:57 +0000 UTC,ContainerStatuses:[{redis {nil ContainerStateRunning{StartedAt:2019-06-20 10:45:58 +0000 UTC,} nil} {nil nil nil} true 0 gcr.io/kubernetes-e2e-test-images/redis:1.0 docker-pullable://gcr.io/kubernetes-e2e-test-images/redis@sha256:af4748d1655c08dc54d4be5182135395db9ce87aba2d4699b26b14ae197c5830 docker://6210f4bbfb8f6b4fbc942fba32730b51f913ade32c2c6ecb4aa6bcc49e40e638}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:46:01.587: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "deployment-8639" for this suite.
+Jun 20 10:46:07.604: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:46:07.669: INFO: namespace deployment-8639 deletion completed in 6.078152513s
+
+• [SLOW TEST:15.176 seconds]
+[sig-apps] Deployment
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23
+  RollingUpdateDeployment should delete old pods and create new ones [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Probing container 
+  should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:46:07.670: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename container-probe
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:51
+[It] should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating pod busybox-cc4ab9d4-a2cf-43c9-beaa-45ecd06097de in namespace container-probe-8314
+Jun 20 10:46:09.715: INFO: Started pod busybox-cc4ab9d4-a2cf-43c9-beaa-45ecd06097de in namespace container-probe-8314
+STEP: checking the pod's current state and verifying that restartCount is present
+Jun 20 10:46:09.718: INFO: Initial restart count of pod busybox-cc4ab9d4-a2cf-43c9-beaa-45ecd06097de is 0
+Jun 20 10:46:57.801: INFO: Restart count of pod container-probe-8314/busybox-cc4ab9d4-a2cf-43c9-beaa-45ecd06097de is now 1 (48.08315205s elapsed)
+STEP: deleting the pod
+[AfterEach] [k8s.io] Probing container
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:46:57.811: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-probe-8314" for this suite.
+Jun 20 10:47:03.824: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:47:03.891: INFO: namespace container-probe-8314 deletion completed in 6.076748824s
+
+• [SLOW TEST:56.221 seconds]
+[k8s.io] Probing container
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+[sig-apps] Deployment 
+  deployment should support rollover [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:47:03.891: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename deployment
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:68
+[It] deployment should support rollover [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+Jun 20 10:47:03.929: INFO: Pod name rollover-pod: Found 0 pods out of 1
+Jun 20 10:47:08.933: INFO: Pod name rollover-pod: Found 1 pods out of 1
+STEP: ensuring each pod is running
+Jun 20 10:47:08.933: INFO: Waiting for pods owned by replica set "test-rollover-controller" to become ready
+Jun 20 10:47:10.936: INFO: Creating deployment "test-rollover-deployment"
+Jun 20 10:47:10.943: INFO: Make sure deployment "test-rollover-deployment" performs scaling operations
+Jun 20 10:47:12.948: INFO: Check revision of new replica set for deployment "test-rollover-deployment"
+Jun 20 10:47:12.953: INFO: Ensure that both replica sets have 1 created replica
+Jun 20 10:47:12.958: INFO: Rollover old replica sets for deployment "test-rollover-deployment" with new image update
+Jun 20 10:47:12.963: INFO: Updating deployment test-rollover-deployment
+Jun 20 10:47:12.964: INFO: Wait deployment "test-rollover-deployment" to be observed by the deployment controller
+Jun 20 10:47:14.971: INFO: Wait for revision update of deployment "test-rollover-deployment" to 2
+Jun 20 10:47:14.976: INFO: Make sure deployment "test-rollover-deployment" is complete
+Jun 20 10:47:14.981: INFO: all replica sets need to contain the pod-template-hash label
+Jun 20 10:47:14.981: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:1, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624430, loc:(*time.Location)(0x80bb5c0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624430, loc:(*time.Location)(0x80bb5c0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624433, loc:(*time.Location)(0x80bb5c0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624430, loc:(*time.Location)(0x80bb5c0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-854595fc44\" is progressing."}}, CollisionCount:(*int32)(nil)}
+Jun 20 10:47:16.987: INFO: all replica sets need to contain the pod-template-hash label
+Jun 20 10:47:16.987: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624430, loc:(*time.Location)(0x80bb5c0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624430, loc:(*time.Location)(0x80bb5c0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624435, loc:(*time.Location)(0x80bb5c0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624430, loc:(*time.Location)(0x80bb5c0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-854595fc44\" is progressing."}}, CollisionCount:(*int32)(nil)}
+Jun 20 10:47:18.987: INFO: all replica sets need to contain the pod-template-hash label
+Jun 20 10:47:18.987: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624430, loc:(*time.Location)(0x80bb5c0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624430, loc:(*time.Location)(0x80bb5c0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624435, loc:(*time.Location)(0x80bb5c0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624430, loc:(*time.Location)(0x80bb5c0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-854595fc44\" is progressing."}}, CollisionCount:(*int32)(nil)}
+Jun 20 10:47:20.991: INFO: all replica sets need to contain the pod-template-hash label
+Jun 20 10:47:20.991: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624430, loc:(*time.Location)(0x80bb5c0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624430, loc:(*time.Location)(0x80bb5c0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624435, loc:(*time.Location)(0x80bb5c0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624430, loc:(*time.Location)(0x80bb5c0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-854595fc44\" is progressing."}}, CollisionCount:(*int32)(nil)}
+Jun 20 10:47:22.987: INFO: all replica sets need to contain the pod-template-hash label
+Jun 20 10:47:22.987: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624430, loc:(*time.Location)(0x80bb5c0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624430, loc:(*time.Location)(0x80bb5c0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624435, loc:(*time.Location)(0x80bb5c0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624430, loc:(*time.Location)(0x80bb5c0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-854595fc44\" is progressing."}}, CollisionCount:(*int32)(nil)}
+Jun 20 10:47:24.987: INFO: all replica sets need to contain the pod-template-hash label
+Jun 20 10:47:24.987: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624430, loc:(*time.Location)(0x80bb5c0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624430, loc:(*time.Location)(0x80bb5c0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624435, loc:(*time.Location)(0x80bb5c0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624430, loc:(*time.Location)(0x80bb5c0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-854595fc44\" is progressing."}}, CollisionCount:(*int32)(nil)}
+Jun 20 10:47:26.987: INFO: 
+Jun 20 10:47:26.987: INFO: Ensure that both old replica sets have no replicas
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:62
+Jun 20 10:47:26.993: INFO: Deployment "test-rollover-deployment":
+&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-deployment,GenerateName:,Namespace:deployment-2353,SelfLink:/apis/apps/v1/namespaces/deployment-2353/deployments/test-rollover-deployment,UID:b1d31253-3962-41e5-85ce-17ef6e2b975e,ResourceVersion:18706,Generation:2,CreationTimestamp:2019-06-20 10:47:10 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,},Annotations:map[string]string{deployment.kubernetes.io/revision: 2,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:DeploymentSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,PreemptionPolicy:nil,},},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:0,MaxSurge:1,},},MinReadySeconds:10,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[{Available True 2019-06-20 10:47:10 +0000 UTC 2019-06-20 10:47:10 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} {Progressing True 2019-06-20 10:47:25 +0000 UTC 2019-06-20 10:47:10 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-rollover-deployment-854595fc44" has successfully progressed.}],ReadyReplicas:1,CollisionCount:nil,},}
+
+Jun 20 10:47:26.996: INFO: New ReplicaSet "test-rollover-deployment-854595fc44" of Deployment "test-rollover-deployment":
+&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-deployment-854595fc44,GenerateName:,Namespace:deployment-2353,SelfLink:/apis/apps/v1/namespaces/deployment-2353/replicasets/test-rollover-deployment-854595fc44,UID:355679be-076b-4fdf-a56a-d807a3892b55,ResourceVersion:18694,Generation:2,CreationTimestamp:2019-06-20 10:47:12 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 854595fc44,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 2,},OwnerReferences:[{apps/v1 Deployment test-rollover-deployment b1d31253-3962-41e5-85ce-17ef6e2b975e 0xc0030c0f67 0xc0030c0f68}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod-template-hash: 854595fc44,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 854595fc44,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,PreemptionPolicy:nil,},},MinReadySeconds:10,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:2,ReadyReplicas:1,AvailableReplicas:1,Conditions:[],},}
+Jun 20 10:47:26.996: INFO: All old ReplicaSets of Deployment "test-rollover-deployment":
+Jun 20 10:47:26.996: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-controller,GenerateName:,Namespace:deployment-2353,SelfLink:/apis/apps/v1/namespaces/deployment-2353/replicasets/test-rollover-controller,UID:5a55947f-b1ab-4e2d-887f-1f261a859ed3,ResourceVersion:18705,Generation:2,CreationTimestamp:2019-06-20 10:47:03 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod: nginx,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,},OwnerReferences:[{apps/v1 Deployment test-rollover-deployment b1d31253-3962-41e5-85ce-17ef6e2b975e 0xc0030c0e87 0xc0030c0e88}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*0,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod: nginx,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod: nginx,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,PreemptionPolicy:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},}
+Jun 20 10:47:26.996: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-deployment-9b8b997cf,GenerateName:,Namespace:deployment-2353,SelfLink:/apis/apps/v1/namespaces/deployment-2353/replicasets/test-rollover-deployment-9b8b997cf,UID:ee5f596c-15ab-4d39-a31f-223efb383973,ResourceVersion:18659,Generation:2,CreationTimestamp:2019-06-20 10:47:10 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 9b8b997cf,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 1,},OwnerReferences:[{apps/v1 Deployment test-rollover-deployment b1d31253-3962-41e5-85ce-17ef6e2b975e 0xc0030c1040 0xc0030c1041}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*0,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod-template-hash: 9b8b997cf,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 9b8b997cf,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{redis-slave gcr.io/google_samples/gb-redisslave:nonexistent [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,PreemptionPolicy:nil,},},MinReadySeconds:10,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},}
+Jun 20 10:47:26.999: INFO: Pod "test-rollover-deployment-854595fc44-sx4fq" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-deployment-854595fc44-sx4fq,GenerateName:test-rollover-deployment-854595fc44-,Namespace:deployment-2353,SelfLink:/api/v1/namespaces/deployment-2353/pods/test-rollover-deployment-854595fc44-sx4fq,UID:c0a05506-572e-4ca2-aecd-8ff17664f89b,ResourceVersion:18673,Generation:0,CreationTimestamp:2019-06-20 10:47:12 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 854595fc44,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet test-rollover-deployment-854595fc44 355679be-076b-4fdf-a56a-d807a3892b55 0xc0030c1c47 0xc0030c1c48}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-2rxc8 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-2rxc8,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [{default-token-2rxc8 true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-100-10-111.eu-west-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc0030c1cb0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc0030c1cd0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:47:13 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:47:15 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:47:15 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:47:12 +0000 UTC  }],Message:,Reason:,HostIP:10.100.10.111,PodIP:10.38.0.3,StartTime:2019-06-20 10:47:13 +0000 UTC,ContainerStatuses:[{redis {nil ContainerStateRunning{StartedAt:2019-06-20 10:47:14 +0000 UTC,} nil} {nil nil nil} true 0 gcr.io/kubernetes-e2e-test-images/redis:1.0 docker-pullable://gcr.io/kubernetes-e2e-test-images/redis@sha256:af4748d1655c08dc54d4be5182135395db9ce87aba2d4699b26b14ae197c5830 docker://4b8d5cd5db6a8cd2be44a1a3878191845165bcf1d86a06494a9d48a1ccbb3768}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:47:26.999: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "deployment-2353" for this suite.
+Jun 20 10:47:33.012: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:47:33.084: INFO: namespace deployment-2353 deletion completed in 6.082030642s
+
+• [SLOW TEST:29.193 seconds]
+[sig-apps] Deployment
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23
+  deployment should support rollover [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSS
+------------------------------
+[sig-storage] Projected secret 
+  should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Projected secret
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:47:33.084: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating projection with secret that has name projected-secret-test-1043f72e-bb51-418b-927b-1342d3573c74
+STEP: Creating a pod to test consume secrets
+Jun 20 10:47:33.126: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-c747e9aa-3036-4ea7-93bb-a571c983bed9" in namespace "projected-3588" to be "success or failure"
+Jun 20 10:47:33.129: INFO: Pod "pod-projected-secrets-c747e9aa-3036-4ea7-93bb-a571c983bed9": Phase="Pending", Reason="", readiness=false. Elapsed: 2.840651ms
+Jun 20 10:47:35.133: INFO: Pod "pod-projected-secrets-c747e9aa-3036-4ea7-93bb-a571c983bed9": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.007423318s
+STEP: Saw pod success
+Jun 20 10:47:35.133: INFO: Pod "pod-projected-secrets-c747e9aa-3036-4ea7-93bb-a571c983bed9" satisfied condition "success or failure"
+Jun 20 10:47:35.136: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-projected-secrets-c747e9aa-3036-4ea7-93bb-a571c983bed9 container projected-secret-volume-test: 
+STEP: delete the pod
+Jun 20 10:47:35.154: INFO: Waiting for pod pod-projected-secrets-c747e9aa-3036-4ea7-93bb-a571c983bed9 to disappear
+Jun 20 10:47:35.158: INFO: Pod pod-projected-secrets-c747e9aa-3036-4ea7-93bb-a571c983bed9 no longer exists
+[AfterEach] [sig-storage] Projected secret
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:47:35.158: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-3588" for this suite.
+Jun 20 10:47:41.173: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:47:41.241: INFO: namespace projected-3588 deletion completed in 6.079871163s
+
+• [SLOW TEST:8.157 seconds]
+[sig-storage] Projected secret
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:33
+  should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:47:41.242: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test emptydir 0777 on node default medium
+Jun 20 10:47:41.289: INFO: Waiting up to 5m0s for pod "pod-93c09b5e-1eb9-4001-b122-e833ae7c6a5e" in namespace "emptydir-9406" to be "success or failure"
+Jun 20 10:47:41.293: INFO: Pod "pod-93c09b5e-1eb9-4001-b122-e833ae7c6a5e": Phase="Pending", Reason="", readiness=false. Elapsed: 4.491492ms
+Jun 20 10:47:43.297: INFO: Pod "pod-93c09b5e-1eb9-4001-b122-e833ae7c6a5e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.007659575s
+STEP: Saw pod success
+Jun 20 10:47:43.297: INFO: Pod "pod-93c09b5e-1eb9-4001-b122-e833ae7c6a5e" satisfied condition "success or failure"
+Jun 20 10:47:43.299: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-93c09b5e-1eb9-4001-b122-e833ae7c6a5e container test-container: 
+STEP: delete the pod
+Jun 20 10:47:43.317: INFO: Waiting for pod pod-93c09b5e-1eb9-4001-b122-e833ae7c6a5e to disappear
+Jun 20 10:47:43.319: INFO: Pod pod-93c09b5e-1eb9-4001-b122-e833ae7c6a5e no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:47:43.319: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-9406" for this suite.
+Jun 20 10:47:49.332: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:47:49.399: INFO: namespace emptydir-9406 deletion completed in 6.076426141s
+
+• [SLOW TEST:8.157 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] ReplicationController 
+  should serve a basic image on each replica with a public image  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-apps] ReplicationController
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:47:49.399: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename replication-controller
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should serve a basic image on each replica with a public image  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating replication controller my-hostname-basic-60af33b9-a090-4291-b90a-6114ad36efd8
+Jun 20 10:47:49.455: INFO: Pod name my-hostname-basic-60af33b9-a090-4291-b90a-6114ad36efd8: Found 1 pods out of 1
+Jun 20 10:47:49.455: INFO: Ensuring all pods for ReplicationController "my-hostname-basic-60af33b9-a090-4291-b90a-6114ad36efd8" are running
+Jun 20 10:47:51.462: INFO: Pod "my-hostname-basic-60af33b9-a090-4291-b90a-6114ad36efd8-9dbg7" is running (conditions: [{Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-06-20 10:47:49 +0000 UTC Reason: Message:}])
+Jun 20 10:47:51.462: INFO: Trying to dial the pod
+Jun 20 10:47:56.474: INFO: Controller my-hostname-basic-60af33b9-a090-4291-b90a-6114ad36efd8: Got expected result from replica 1 [my-hostname-basic-60af33b9-a090-4291-b90a-6114ad36efd8-9dbg7]: "my-hostname-basic-60af33b9-a090-4291-b90a-6114ad36efd8-9dbg7", 1 of 1 required successes so far
+[AfterEach] [sig-apps] ReplicationController
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:47:56.474: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "replication-controller-8938" for this suite.
+Jun 20 10:48:02.488: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:48:02.556: INFO: namespace replication-controller-8938 deletion completed in 6.078552995s
+
+• [SLOW TEST:13.157 seconds]
+[sig-apps] ReplicationController
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23
+  should serve a basic image on each replica with a public image  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should update annotations on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:48:02.556: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should update annotations on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating the pod
+Jun 20 10:48:05.138: INFO: Successfully updated pod "annotationupdate208eb690-d4c8-467a-9296-28ae9855e3ca"
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:48:07.161: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-7671" for this suite.
+Jun 20 10:48:29.175: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:48:29.244: INFO: namespace projected-7671 deletion completed in 22.079538445s
+
+• [SLOW TEST:26.688 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should update annotations on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] KubeletManagedEtcHosts 
+  should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] KubeletManagedEtcHosts
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:48:29.245: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename e2e-kubelet-etc-hosts
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Setting up the test
+STEP: Creating hostNetwork=false pod
+STEP: Creating hostNetwork=true pod
+STEP: Running the test
+STEP: Verifying /etc/hosts of container is kubelet-managed for pod with hostNetwork=false
+Jun 20 10:48:37.325: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-5713 PodName:test-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 20 10:48:37.325: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+Jun 20 10:48:37.434: INFO: Exec stderr: ""
+Jun 20 10:48:37.434: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-5713 PodName:test-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 20 10:48:37.434: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+Jun 20 10:48:37.543: INFO: Exec stderr: ""
+Jun 20 10:48:37.543: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-5713 PodName:test-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 20 10:48:37.543: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+Jun 20 10:48:37.653: INFO: Exec stderr: ""
+Jun 20 10:48:37.653: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-5713 PodName:test-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 20 10:48:37.653: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+Jun 20 10:48:37.774: INFO: Exec stderr: ""
+STEP: Verifying /etc/hosts of container is not kubelet-managed since container specifies /etc/hosts mount
+Jun 20 10:48:37.774: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-5713 PodName:test-pod ContainerName:busybox-3 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 20 10:48:37.774: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+Jun 20 10:48:37.881: INFO: Exec stderr: ""
+Jun 20 10:48:37.881: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-5713 PodName:test-pod ContainerName:busybox-3 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 20 10:48:37.881: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+Jun 20 10:48:38.064: INFO: Exec stderr: ""
+STEP: Verifying /etc/hosts content of container is not kubelet-managed for pod with hostNetwork=true
+Jun 20 10:48:38.064: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-5713 PodName:test-host-network-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 20 10:48:38.064: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+Jun 20 10:48:38.148: INFO: Exec stderr: ""
+Jun 20 10:48:38.148: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-5713 PodName:test-host-network-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 20 10:48:38.148: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+Jun 20 10:48:38.225: INFO: Exec stderr: ""
+Jun 20 10:48:38.226: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-5713 PodName:test-host-network-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 20 10:48:38.226: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+Jun 20 10:48:38.317: INFO: Exec stderr: ""
+Jun 20 10:48:38.317: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-5713 PodName:test-host-network-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 20 10:48:38.317: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+Jun 20 10:48:38.400: INFO: Exec stderr: ""
+[AfterEach] [k8s.io] KubeletManagedEtcHosts
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:48:38.400: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-kubelet-etc-hosts-5713" for this suite.
+Jun 20 10:49:16.415: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:49:16.481: INFO: namespace e2e-kubelet-etc-hosts-5713 deletion completed in 38.077058633s
+
+• [SLOW TEST:47.236 seconds]
+[k8s.io] KubeletManagedEtcHosts
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Probing container 
+  should have monotonically increasing restart count [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:49:16.482: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename container-probe
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:51
+[It] should have monotonically increasing restart count [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating pod liveness-359768ed-3b49-4f6d-b55c-30e2c1f869ba in namespace container-probe-8293
+Jun 20 10:49:18.528: INFO: Started pod liveness-359768ed-3b49-4f6d-b55c-30e2c1f869ba in namespace container-probe-8293
+STEP: checking the pod's current state and verifying that restartCount is present
+Jun 20 10:49:18.531: INFO: Initial restart count of pod liveness-359768ed-3b49-4f6d-b55c-30e2c1f869ba is 0
+Jun 20 10:49:32.556: INFO: Restart count of pod container-probe-8293/liveness-359768ed-3b49-4f6d-b55c-30e2c1f869ba is now 1 (14.025630654s elapsed)
+Jun 20 10:49:54.595: INFO: Restart count of pod container-probe-8293/liveness-359768ed-3b49-4f6d-b55c-30e2c1f869ba is now 2 (36.064658088s elapsed)
+Jun 20 10:50:14.630: INFO: Restart count of pod container-probe-8293/liveness-359768ed-3b49-4f6d-b55c-30e2c1f869ba is now 3 (56.099716795s elapsed)
+Jun 20 10:50:34.677: INFO: Restart count of pod container-probe-8293/liveness-359768ed-3b49-4f6d-b55c-30e2c1f869ba is now 4 (1m16.145955297s elapsed)
+Jun 20 10:51:38.787: INFO: Restart count of pod container-probe-8293/liveness-359768ed-3b49-4f6d-b55c-30e2c1f869ba is now 5 (2m20.256140295s elapsed)
+STEP: deleting the pod
+[AfterEach] [k8s.io] Probing container
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:51:38.797: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-probe-8293" for this suite.
+Jun 20 10:51:44.810: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:51:44.878: INFO: namespace container-probe-8293 deletion completed in 6.077083318s
+
+• [SLOW TEST:148.396 seconds]
+[k8s.io] Probing container
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  should have monotonically increasing restart count [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSS
+------------------------------
+[sig-api-machinery] Watchers 
+  should observe an object deletion if it stops meeting the requirements of the selector [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:51:44.878: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename watch
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should observe an object deletion if it stops meeting the requirements of the selector [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: creating a watch on configmaps with a certain label
+STEP: creating a new configmap
+STEP: modifying the configmap once
+STEP: changing the label value of the configmap
+STEP: Expecting to observe a delete notification for the watched object
+Jun 20 10:51:44.926: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:watch-8434,SelfLink:/api/v1/namespaces/watch-8434/configmaps/e2e-watch-test-label-changed,UID:4c3e2d8b-758e-4386-bcdd-e34e1c0a0468,ResourceVersion:19447,Generation:0,CreationTimestamp:2019-06-20 10:51:44 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{},BinaryData:map[string][]byte{},}
+Jun 20 10:51:44.926: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:watch-8434,SelfLink:/api/v1/namespaces/watch-8434/configmaps/e2e-watch-test-label-changed,UID:4c3e2d8b-758e-4386-bcdd-e34e1c0a0468,ResourceVersion:19448,Generation:0,CreationTimestamp:2019-06-20 10:51:44 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},}
+Jun 20 10:51:44.926: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:watch-8434,SelfLink:/api/v1/namespaces/watch-8434/configmaps/e2e-watch-test-label-changed,UID:4c3e2d8b-758e-4386-bcdd-e34e1c0a0468,ResourceVersion:19449,Generation:0,CreationTimestamp:2019-06-20 10:51:44 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},}
+STEP: modifying the configmap a second time
+STEP: Expecting not to observe a notification because the object no longer meets the selector's requirements
+STEP: changing the label value of the configmap back
+STEP: modifying the configmap a third time
+STEP: deleting the configmap
+STEP: Expecting to observe an add notification for the watched object when the label value was restored
+Jun 20 10:51:54.947: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:watch-8434,SelfLink:/api/v1/namespaces/watch-8434/configmaps/e2e-watch-test-label-changed,UID:4c3e2d8b-758e-4386-bcdd-e34e1c0a0468,ResourceVersion:19472,Generation:0,CreationTimestamp:2019-06-20 10:51:44 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+Jun 20 10:51:54.947: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:watch-8434,SelfLink:/api/v1/namespaces/watch-8434/configmaps/e2e-watch-test-label-changed,UID:4c3e2d8b-758e-4386-bcdd-e34e1c0a0468,ResourceVersion:19473,Generation:0,CreationTimestamp:2019-06-20 10:51:44 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},}
+Jun 20 10:51:54.947: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:watch-8434,SelfLink:/api/v1/namespaces/watch-8434/configmaps/e2e-watch-test-label-changed,UID:4c3e2d8b-758e-4386-bcdd-e34e1c0a0468,ResourceVersion:19474,Generation:0,CreationTimestamp:2019-06-20 10:51:44 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},}
+[AfterEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:51:54.947: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "watch-8434" for this suite.
+Jun 20 10:52:00.960: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:52:01.035: INFO: namespace watch-8434 deletion completed in 6.084925735s
+
+• [SLOW TEST:16.157 seconds]
+[sig-api-machinery] Watchers
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23
+  should observe an object deletion if it stops meeting the requirements of the selector [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl run rc 
+  should create an rc from an image  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:52:01.035: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:221
+[BeforeEach] [k8s.io] Kubectl run rc
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1457
+[It] should create an rc from an image  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: running the image docker.io/library/nginx:1.14-alpine
+Jun 20 10:52:01.065: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 run e2e-test-nginx-rc --image=docker.io/library/nginx:1.14-alpine --generator=run/v1 --namespace=kubectl-6397'
+Jun 20 10:52:01.386: INFO: stderr: "kubectl run --generator=run/v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n"
+Jun 20 10:52:01.386: INFO: stdout: "replicationcontroller/e2e-test-nginx-rc created\n"
+STEP: verifying the rc e2e-test-nginx-rc was created
+STEP: verifying the pod controlled by rc e2e-test-nginx-rc was created
+STEP: confirm that you can get logs from an rc
+Jun 20 10:52:01.397: INFO: Waiting up to 5m0s for 1 pods to be running and ready: [e2e-test-nginx-rc-vjwk6]
+Jun 20 10:52:01.397: INFO: Waiting up to 5m0s for pod "e2e-test-nginx-rc-vjwk6" in namespace "kubectl-6397" to be "running and ready"
+Jun 20 10:52:01.403: INFO: Pod "e2e-test-nginx-rc-vjwk6": Phase="Pending", Reason="", readiness=false. Elapsed: 6.326291ms
+Jun 20 10:52:03.406: INFO: Pod "e2e-test-nginx-rc-vjwk6": Phase="Pending", Reason="", readiness=false. Elapsed: 2.009418856s
+Jun 20 10:52:05.413: INFO: Pod "e2e-test-nginx-rc-vjwk6": Phase="Running", Reason="", readiness=true. Elapsed: 4.01568907s
+Jun 20 10:52:05.413: INFO: Pod "e2e-test-nginx-rc-vjwk6" satisfied condition "running and ready"
+Jun 20 10:52:05.413: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [e2e-test-nginx-rc-vjwk6]
+Jun 20 10:52:05.413: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 logs rc/e2e-test-nginx-rc --namespace=kubectl-6397'
+Jun 20 10:52:05.581: INFO: stderr: ""
+Jun 20 10:52:05.581: INFO: stdout: ""
+[AfterEach] [k8s.io] Kubectl run rc
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1462
+Jun 20 10:52:05.581: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 delete rc e2e-test-nginx-rc --namespace=kubectl-6397'
+Jun 20 10:52:05.659: INFO: stderr: ""
+Jun 20 10:52:05.659: INFO: stdout: "replicationcontroller \"e2e-test-nginx-rc\" deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:52:05.659: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-6397" for this suite.
+Jun 20 10:52:11.673: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:52:11.738: INFO: namespace kubectl-6397 deletion completed in 6.075459597s
+
+• [SLOW TEST:10.703 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl run rc
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+    should create an rc from an image  [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSS
+------------------------------
+[sig-auth] ServiceAccounts 
+  should mount an API token into pods  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-auth] ServiceAccounts
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:52:11.738: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename svcaccounts
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should mount an API token into pods  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: getting the auto-created API token
+STEP: reading a file in the container
+Jun 20 10:52:14.293: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-3672 pod-service-account-38806c24-e01b-4342-b986-b002f767ded9 -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/token'
+STEP: reading a file in the container
+Jun 20 10:52:14.449: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-3672 pod-service-account-38806c24-e01b-4342-b986-b002f767ded9 -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/ca.crt'
+STEP: reading a file in the container
+Jun 20 10:52:14.603: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-3672 pod-service-account-38806c24-e01b-4342-b986-b002f767ded9 -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/namespace'
+[AfterEach] [sig-auth] ServiceAccounts
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:52:14.767: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "svcaccounts-3672" for this suite.
+Jun 20 10:52:20.781: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:52:20.850: INFO: namespace svcaccounts-3672 deletion completed in 6.078752058s
+
+• [SLOW TEST:9.111 seconds]
+[sig-auth] ServiceAccounts
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/auth/framework.go:23
+  should mount an API token into pods  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Watchers 
+  should be able to restart watching from the last resource version observed by the previous watch [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:52:20.850: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename watch
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be able to restart watching from the last resource version observed by the previous watch [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: creating a watch on configmaps
+STEP: creating a new configmap
+STEP: modifying the configmap once
+STEP: closing the watch once it receives two notifications
+Jun 20 10:52:20.891: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-watch-closed,GenerateName:,Namespace:watch-232,SelfLink:/api/v1/namespaces/watch-232/configmaps/e2e-watch-test-watch-closed,UID:3e5ff537-f9cc-4247-97da-8fb401aa2c3c,ResourceVersion:19590,Generation:0,CreationTimestamp:2019-06-20 10:52:20 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: watch-closed-and-restarted,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{},BinaryData:map[string][]byte{},}
+Jun 20 10:52:20.891: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-watch-closed,GenerateName:,Namespace:watch-232,SelfLink:/api/v1/namespaces/watch-232/configmaps/e2e-watch-test-watch-closed,UID:3e5ff537-f9cc-4247-97da-8fb401aa2c3c,ResourceVersion:19591,Generation:0,CreationTimestamp:2019-06-20 10:52:20 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: watch-closed-and-restarted,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},}
+STEP: modifying the configmap a second time, while the watch is closed
+STEP: creating a new watch on configmaps from the last resource version observed by the first watch
+STEP: deleting the configmap
+STEP: Expecting to observe notifications for all changes to the configmap since the first watch closed
+Jun 20 10:52:20.903: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-watch-closed,GenerateName:,Namespace:watch-232,SelfLink:/api/v1/namespaces/watch-232/configmaps/e2e-watch-test-watch-closed,UID:3e5ff537-f9cc-4247-97da-8fb401aa2c3c,ResourceVersion:19592,Generation:0,CreationTimestamp:2019-06-20 10:52:20 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: watch-closed-and-restarted,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+Jun 20 10:52:20.903: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-watch-closed,GenerateName:,Namespace:watch-232,SelfLink:/api/v1/namespaces/watch-232/configmaps/e2e-watch-test-watch-closed,UID:3e5ff537-f9cc-4247-97da-8fb401aa2c3c,ResourceVersion:19593,Generation:0,CreationTimestamp:2019-06-20 10:52:20 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: watch-closed-and-restarted,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+[AfterEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:52:20.903: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "watch-232" for this suite.
+Jun 20 10:52:26.928: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:52:27.141: INFO: namespace watch-232 deletion completed in 6.234238531s
+
+• [SLOW TEST:6.291 seconds]
+[sig-api-machinery] Watchers
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23
+  should be able to restart watching from the last resource version observed by the previous watch [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSS
+------------------------------
+[sig-network] Networking Granular Checks: Pods 
+  should function for intra-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-network] Networking
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:52:27.141: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename pod-network-test
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should function for intra-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Performing setup for networking test in namespace pod-network-test-6015
+STEP: creating a selector
+STEP: Creating the service pods in kubernetes
+Jun 20 10:52:27.241: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable
+STEP: Creating test pods
+Jun 20 10:52:53.319: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.38.0.3:8080/dial?request=hostName&protocol=udp&host=10.38.0.2&port=8081&tries=1'] Namespace:pod-network-test-6015 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 20 10:52:53.319: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+Jun 20 10:52:53.409: INFO: Waiting for endpoints: map[]
+Jun 20 10:52:53.412: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.38.0.3:8080/dial?request=hostName&protocol=udp&host=10.34.0.2&port=8081&tries=1'] Namespace:pod-network-test-6015 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 20 10:52:53.412: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+Jun 20 10:52:53.501: INFO: Waiting for endpoints: map[]
+[AfterEach] [sig-network] Networking
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:52:53.501: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pod-network-test-6015" for this suite.
+Jun 20 10:53:15.514: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:53:15.582: INFO: namespace pod-network-test-6015 deletion completed in 22.077769903s
+
+• [SLOW TEST:48.441 seconds]
+[sig-network] Networking
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:25
+  Granular Checks: Pods
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:28
+    should function for intra-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSS
+------------------------------
+[k8s.io] Kubelet when scheduling a busybox command that always fails in a pod 
+  should have an terminated reason [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:53:15.582: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename kubelet-test
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37
+[BeforeEach] when scheduling a busybox command that always fails in a pod
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:81
+[It] should have an terminated reason [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[AfterEach] [k8s.io] Kubelet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:53:19.634: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubelet-test-7059" for this suite.
+Jun 20 10:53:25.648: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:53:25.716: INFO: namespace kubelet-test-7059 deletion completed in 6.079406877s
+
+• [SLOW TEST:10.134 seconds]
+[k8s.io] Kubelet
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  when scheduling a busybox command that always fails in a pod
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:78
+    should have an terminated reason [NodeConformance] [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-network] DNS 
+  should provide DNS for ExternalName services [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-network] DNS
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:53:25.717: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename dns
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide DNS for ExternalName services [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a test externalName service
+STEP: Running these commands on wheezy: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-4353.svc.cluster.local CNAME > /results/wheezy_udp@dns-test-service-3.dns-4353.svc.cluster.local; sleep 1; done
+
+STEP: Running these commands on jessie: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-4353.svc.cluster.local CNAME > /results/jessie_udp@dns-test-service-3.dns-4353.svc.cluster.local; sleep 1; done
+
+STEP: creating a pod to probe DNS
+STEP: submitting the pod to kubernetes
+STEP: retrieving the pod
+STEP: looking for the results for each expected name from probers
+Jun 20 10:53:29.781: INFO: DNS probes using dns-test-cc41ab2e-37b4-48f6-bc8c-da4fb68f1aa1 succeeded
+
+STEP: deleting the pod
+STEP: changing the externalName to bar.example.com
+STEP: Running these commands on wheezy: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-4353.svc.cluster.local CNAME > /results/wheezy_udp@dns-test-service-3.dns-4353.svc.cluster.local; sleep 1; done
+
+STEP: Running these commands on jessie: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-4353.svc.cluster.local CNAME > /results/jessie_udp@dns-test-service-3.dns-4353.svc.cluster.local; sleep 1; done
+
+STEP: creating a second pod to probe DNS
+STEP: submitting the pod to kubernetes
+STEP: retrieving the pod
+STEP: looking for the results for each expected name from probers
+Jun 20 10:53:33.822: INFO: File wheezy_udp@dns-test-service-3.dns-4353.svc.cluster.local from pod  dns-4353/dns-test-437103fa-dcfb-4972-857b-ffcb37238d60 contains 'foo.example.com.
+' instead of 'bar.example.com.'
+Jun 20 10:53:33.826: INFO: File jessie_udp@dns-test-service-3.dns-4353.svc.cluster.local from pod  dns-4353/dns-test-437103fa-dcfb-4972-857b-ffcb37238d60 contains 'foo.example.com.
+' instead of 'bar.example.com.'
+Jun 20 10:53:33.826: INFO: Lookups using dns-4353/dns-test-437103fa-dcfb-4972-857b-ffcb37238d60 failed for: [wheezy_udp@dns-test-service-3.dns-4353.svc.cluster.local jessie_udp@dns-test-service-3.dns-4353.svc.cluster.local]
+
+Jun 20 10:53:38.831: INFO: File wheezy_udp@dns-test-service-3.dns-4353.svc.cluster.local from pod  dns-4353/dns-test-437103fa-dcfb-4972-857b-ffcb37238d60 contains 'foo.example.com.
+' instead of 'bar.example.com.'
+Jun 20 10:53:38.835: INFO: File jessie_udp@dns-test-service-3.dns-4353.svc.cluster.local from pod  dns-4353/dns-test-437103fa-dcfb-4972-857b-ffcb37238d60 contains 'foo.example.com.
+' instead of 'bar.example.com.'
+Jun 20 10:53:38.835: INFO: Lookups using dns-4353/dns-test-437103fa-dcfb-4972-857b-ffcb37238d60 failed for: [wheezy_udp@dns-test-service-3.dns-4353.svc.cluster.local jessie_udp@dns-test-service-3.dns-4353.svc.cluster.local]
+
+Jun 20 10:53:43.831: INFO: File wheezy_udp@dns-test-service-3.dns-4353.svc.cluster.local from pod  dns-4353/dns-test-437103fa-dcfb-4972-857b-ffcb37238d60 contains 'foo.example.com.
+' instead of 'bar.example.com.'
+Jun 20 10:53:43.835: INFO: File jessie_udp@dns-test-service-3.dns-4353.svc.cluster.local from pod  dns-4353/dns-test-437103fa-dcfb-4972-857b-ffcb37238d60 contains 'foo.example.com.
+' instead of 'bar.example.com.'
+Jun 20 10:53:43.835: INFO: Lookups using dns-4353/dns-test-437103fa-dcfb-4972-857b-ffcb37238d60 failed for: [wheezy_udp@dns-test-service-3.dns-4353.svc.cluster.local jessie_udp@dns-test-service-3.dns-4353.svc.cluster.local]
+
+Jun 20 10:53:48.831: INFO: File wheezy_udp@dns-test-service-3.dns-4353.svc.cluster.local from pod  dns-4353/dns-test-437103fa-dcfb-4972-857b-ffcb37238d60 contains 'foo.example.com.
+' instead of 'bar.example.com.'
+Jun 20 10:53:48.836: INFO: File jessie_udp@dns-test-service-3.dns-4353.svc.cluster.local from pod  dns-4353/dns-test-437103fa-dcfb-4972-857b-ffcb37238d60 contains 'foo.example.com.
+' instead of 'bar.example.com.'
+Jun 20 10:53:48.836: INFO: Lookups using dns-4353/dns-test-437103fa-dcfb-4972-857b-ffcb37238d60 failed for: [wheezy_udp@dns-test-service-3.dns-4353.svc.cluster.local jessie_udp@dns-test-service-3.dns-4353.svc.cluster.local]
+
+Jun 20 10:53:53.831: INFO: File wheezy_udp@dns-test-service-3.dns-4353.svc.cluster.local from pod  dns-4353/dns-test-437103fa-dcfb-4972-857b-ffcb37238d60 contains 'foo.example.com.
+' instead of 'bar.example.com.'
+Jun 20 10:53:53.835: INFO: File jessie_udp@dns-test-service-3.dns-4353.svc.cluster.local from pod  dns-4353/dns-test-437103fa-dcfb-4972-857b-ffcb37238d60 contains 'foo.example.com.
+' instead of 'bar.example.com.'
+Jun 20 10:53:53.835: INFO: Lookups using dns-4353/dns-test-437103fa-dcfb-4972-857b-ffcb37238d60 failed for: [wheezy_udp@dns-test-service-3.dns-4353.svc.cluster.local jessie_udp@dns-test-service-3.dns-4353.svc.cluster.local]
+
+Jun 20 10:53:58.835: INFO: DNS probes using dns-test-437103fa-dcfb-4972-857b-ffcb37238d60 succeeded
+
+STEP: deleting the pod
+STEP: changing the service to type=ClusterIP
+STEP: Running these commands on wheezy: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-4353.svc.cluster.local A > /results/wheezy_udp@dns-test-service-3.dns-4353.svc.cluster.local; sleep 1; done
+
+STEP: Running these commands on jessie: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-4353.svc.cluster.local A > /results/jessie_udp@dns-test-service-3.dns-4353.svc.cluster.local; sleep 1; done
+
+STEP: creating a third pod to probe DNS
+STEP: submitting the pod to kubernetes
+STEP: retrieving the pod
+STEP: looking for the results for each expected name from probers
+Jun 20 10:54:00.901: INFO: DNS probes using dns-test-631154d9-864f-4dc3-9fee-55cb2a1d86a8 succeeded
+
+STEP: deleting the pod
+STEP: deleting the test externalName service
+[AfterEach] [sig-network] DNS
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:54:00.952: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "dns-4353" for this suite.
+Jun 20 10:54:06.969: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:54:07.043: INFO: namespace dns-4353 deletion completed in 6.08680348s
+
+• [SLOW TEST:41.327 seconds]
+[sig-network] DNS
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23
+  should provide DNS for ExternalName services [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Pods 
+  should support retrieving logs from the container over websockets [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:54:07.044: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename pods
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:164
+[It] should support retrieving logs from the container over websockets [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+Jun 20 10:54:07.074: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: creating the pod
+STEP: submitting the pod to kubernetes
+[AfterEach] [k8s.io] Pods
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:54:09.110: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pods-3511" for this suite.
+Jun 20 10:55:01.124: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:55:01.238: INFO: namespace pods-3511 deletion completed in 52.124293246s
+
+• [SLOW TEST:54.194 seconds]
+[k8s.io] Pods
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  should support retrieving logs from the container over websockets [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:55:01.238: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test downward API volume plugin
+Jun 20 10:55:01.323: INFO: Waiting up to 5m0s for pod "downwardapi-volume-7b392ad1-aab0-4732-a026-f959c7dc2bf7" in namespace "projected-5242" to be "success or failure"
+Jun 20 10:55:01.326: INFO: Pod "downwardapi-volume-7b392ad1-aab0-4732-a026-f959c7dc2bf7": Phase="Pending", Reason="", readiness=false. Elapsed: 3.327977ms
+Jun 20 10:55:03.333: INFO: Pod "downwardapi-volume-7b392ad1-aab0-4732-a026-f959c7dc2bf7": Phase="Pending", Reason="", readiness=false. Elapsed: 2.010652037s
+Jun 20 10:55:05.337: INFO: Pod "downwardapi-volume-7b392ad1-aab0-4732-a026-f959c7dc2bf7": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.013915141s
+STEP: Saw pod success
+Jun 20 10:55:05.337: INFO: Pod "downwardapi-volume-7b392ad1-aab0-4732-a026-f959c7dc2bf7" satisfied condition "success or failure"
+Jun 20 10:55:05.339: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod downwardapi-volume-7b392ad1-aab0-4732-a026-f959c7dc2bf7 container client-container: 
+STEP: delete the pod
+Jun 20 10:55:05.354: INFO: Waiting for pod downwardapi-volume-7b392ad1-aab0-4732-a026-f959c7dc2bf7 to disappear
+Jun 20 10:55:05.357: INFO: Pod downwardapi-volume-7b392ad1-aab0-4732-a026-f959c7dc2bf7 no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:55:05.357: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-5242" for this suite.
+Jun 20 10:55:11.369: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:55:11.433: INFO: namespace projected-5242 deletion completed in 6.073088441s
+
+• [SLOW TEST:10.195 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] [sig-node] Pods Extended [k8s.io] Delete Grace Period 
+  should be submitted and removed [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] [sig-node] Pods Extended
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:55:11.433: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename pods
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Delete Grace Period
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/node/pods.go:47
+[It] should be submitted and removed [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: creating the pod
+STEP: setting up selector
+STEP: submitting the pod to kubernetes
+STEP: verifying the pod is in kubernetes
+Jun 20 10:55:13.489: INFO: Asynchronously running '/usr/local/bin/kubectl kubectl --kubeconfig=/tmp/kubeconfig-878248618 proxy -p 0'
+STEP: deleting the pod gracefully
+STEP: verifying the kubelet observed the termination notice
+Jun 20 10:55:18.554: INFO: no pod exists with the name we were looking for, assuming the termination request was observed and completed
+[AfterEach] [k8s.io] [sig-node] Pods Extended
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:55:18.557: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pods-9654" for this suite.
+Jun 20 10:55:24.570: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:55:24.638: INFO: namespace pods-9654 deletion completed in 6.077978053s
+
+• [SLOW TEST:13.205 seconds]
+[k8s.io] [sig-node] Pods Extended
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  [k8s.io] Delete Grace Period
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+    should be submitted and removed [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-node] ConfigMap 
+  should be consumable via the environment [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-node] ConfigMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:55:24.638: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename configmap
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable via the environment [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating configMap configmap-1846/configmap-test-88b6b39e-84e6-444c-9cf5-2a56bd128c08
+STEP: Creating a pod to test consume configMaps
+Jun 20 10:55:24.690: INFO: Waiting up to 5m0s for pod "pod-configmaps-ebb8a76f-ad44-4cb7-b98b-3b1d2b8ce4b7" in namespace "configmap-1846" to be "success or failure"
+Jun 20 10:55:24.694: INFO: Pod "pod-configmaps-ebb8a76f-ad44-4cb7-b98b-3b1d2b8ce4b7": Phase="Pending", Reason="", readiness=false. Elapsed: 3.507706ms
+Jun 20 10:55:26.697: INFO: Pod "pod-configmaps-ebb8a76f-ad44-4cb7-b98b-3b1d2b8ce4b7": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006609995s
+STEP: Saw pod success
+Jun 20 10:55:26.697: INFO: Pod "pod-configmaps-ebb8a76f-ad44-4cb7-b98b-3b1d2b8ce4b7" satisfied condition "success or failure"
+Jun 20 10:55:26.699: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-configmaps-ebb8a76f-ad44-4cb7-b98b-3b1d2b8ce4b7 container env-test: 
+STEP: delete the pod
+Jun 20 10:55:26.716: INFO: Waiting for pod pod-configmaps-ebb8a76f-ad44-4cb7-b98b-3b1d2b8ce4b7 to disappear
+Jun 20 10:55:26.719: INFO: Pod pod-configmaps-ebb8a76f-ad44-4cb7-b98b-3b1d2b8ce4b7 no longer exists
+[AfterEach] [sig-node] ConfigMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:55:26.719: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "configmap-1846" for this suite.
+Jun 20 10:55:32.732: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:55:32.803: INFO: namespace configmap-1846 deletion completed in 6.081081158s
+
+• [SLOW TEST:8.164 seconds]
+[sig-node] ConfigMap
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap.go:31
+  should be consumable via the environment [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] ReplicationController 
+  should adopt matching pods on creation [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-apps] ReplicationController
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:55:32.804: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename replication-controller
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should adopt matching pods on creation [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Given a Pod with a 'name' label pod-adoption is created
+STEP: When a replication controller with a matching selector is created
+STEP: Then the orphan pod is adopted
+[AfterEach] [sig-apps] ReplicationController
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:55:35.870: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "replication-controller-7902" for this suite.
+Jun 20 10:55:57.882: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:55:57.958: INFO: namespace replication-controller-7902 deletion completed in 22.085688347s
+
+• [SLOW TEST:25.154 seconds]
+[sig-apps] ReplicationController
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23
+  should adopt matching pods on creation [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should provide container's cpu request [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:55:57.958: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should provide container's cpu request [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test downward API volume plugin
+Jun 20 10:55:58.002: INFO: Waiting up to 5m0s for pod "downwardapi-volume-a52574d1-9de8-4270-80ec-c20260b265a2" in namespace "downward-api-7588" to be "success or failure"
+Jun 20 10:55:58.006: INFO: Pod "downwardapi-volume-a52574d1-9de8-4270-80ec-c20260b265a2": Phase="Pending", Reason="", readiness=false. Elapsed: 3.880596ms
+Jun 20 10:56:00.010: INFO: Pod "downwardapi-volume-a52574d1-9de8-4270-80ec-c20260b265a2": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.007654747s
+STEP: Saw pod success
+Jun 20 10:56:00.010: INFO: Pod "downwardapi-volume-a52574d1-9de8-4270-80ec-c20260b265a2" satisfied condition "success or failure"
+Jun 20 10:56:00.013: INFO: Trying to get logs from node ip-10-100-12-226.eu-west-1.compute.internal pod downwardapi-volume-a52574d1-9de8-4270-80ec-c20260b265a2 container client-container: 
+STEP: delete the pod
+Jun 20 10:56:00.037: INFO: Waiting for pod downwardapi-volume-a52574d1-9de8-4270-80ec-c20260b265a2 to disappear
+Jun 20 10:56:00.040: INFO: Pod downwardapi-volume-a52574d1-9de8-4270-80ec-c20260b265a2 no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:56:00.040: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-7588" for this suite.
+Jun 20 10:56:06.058: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:56:06.136: INFO: namespace downward-api-7588 deletion completed in 6.088499459s
+
+• [SLOW TEST:8.178 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should provide container's cpu request [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-node] Downward API 
+  should provide pod UID as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-node] Downward API
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:56:06.136: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide pod UID as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test downward api env vars
+Jun 20 10:56:06.185: INFO: Waiting up to 5m0s for pod "downward-api-ae95a252-3da5-4f9b-b49a-a9ec29453229" in namespace "downward-api-4853" to be "success or failure"
+Jun 20 10:56:06.188: INFO: Pod "downward-api-ae95a252-3da5-4f9b-b49a-a9ec29453229": Phase="Pending", Reason="", readiness=false. Elapsed: 2.786617ms
+Jun 20 10:56:08.191: INFO: Pod "downward-api-ae95a252-3da5-4f9b-b49a-a9ec29453229": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.005847731s
+STEP: Saw pod success
+Jun 20 10:56:08.191: INFO: Pod "downward-api-ae95a252-3da5-4f9b-b49a-a9ec29453229" satisfied condition "success or failure"
+Jun 20 10:56:08.193: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod downward-api-ae95a252-3da5-4f9b-b49a-a9ec29453229 container dapi-container: 
+STEP: delete the pod
+Jun 20 10:56:08.210: INFO: Waiting for pod downward-api-ae95a252-3da5-4f9b-b49a-a9ec29453229 to disappear
+Jun 20 10:56:08.213: INFO: Pod downward-api-ae95a252-3da5-4f9b-b49a-a9ec29453229 no longer exists
+[AfterEach] [sig-node] Downward API
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:56:08.213: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-4853" for this suite.
+Jun 20 10:56:14.233: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:56:14.305: INFO: namespace downward-api-4853 deletion completed in 6.083079562s
+
+• [SLOW TEST:8.169 seconds]
+[sig-node] Downward API
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:32
+  should provide pod UID as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSS
+------------------------------
+[k8s.io] Variable Expansion 
+  should allow substituting values in a container's args [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Variable Expansion
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:56:14.305: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename var-expansion
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should allow substituting values in a container's args [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test substitution in container's args
+Jun 20 10:56:14.347: INFO: Waiting up to 5m0s for pod "var-expansion-47b54e75-1fa9-472b-b61c-be36c2074bdf" in namespace "var-expansion-7642" to be "success or failure"
+Jun 20 10:56:14.351: INFO: Pod "var-expansion-47b54e75-1fa9-472b-b61c-be36c2074bdf": Phase="Pending", Reason="", readiness=false. Elapsed: 3.230618ms
+Jun 20 10:56:16.354: INFO: Pod "var-expansion-47b54e75-1fa9-472b-b61c-be36c2074bdf": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006484175s
+STEP: Saw pod success
+Jun 20 10:56:16.354: INFO: Pod "var-expansion-47b54e75-1fa9-472b-b61c-be36c2074bdf" satisfied condition "success or failure"
+Jun 20 10:56:16.357: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod var-expansion-47b54e75-1fa9-472b-b61c-be36c2074bdf container dapi-container: 
+STEP: delete the pod
+Jun 20 10:56:16.373: INFO: Waiting for pod var-expansion-47b54e75-1fa9-472b-b61c-be36c2074bdf to disappear
+Jun 20 10:56:16.376: INFO: Pod var-expansion-47b54e75-1fa9-472b-b61c-be36c2074bdf no longer exists
+[AfterEach] [k8s.io] Variable Expansion
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:56:16.376: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "var-expansion-7642" for this suite.
+Jun 20 10:56:22.390: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:56:22.454: INFO: namespace var-expansion-7642 deletion completed in 6.074852472s
+
+• [SLOW TEST:8.149 seconds]
+[k8s.io] Variable Expansion
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  should allow substituting values in a container's args [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+S
+------------------------------
+[sig-api-machinery] Aggregator 
+  Should be able to support the 1.10 Sample API Server using the current Aggregator [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-api-machinery] Aggregator
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:56:22.454: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename aggregator
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-api-machinery] Aggregator
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/aggregator.go:76
+Jun 20 10:56:22.485: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+[It] Should be able to support the 1.10 Sample API Server using the current Aggregator [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Registering the sample API server.
+Jun 20 10:56:22.996: INFO: deployment "sample-apiserver-deployment" doesn't have the required revision set
+Jun 20 10:56:25.034: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624983, loc:(*time.Location)(0x80bb5c0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624983, loc:(*time.Location)(0x80bb5c0)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624983, loc:(*time.Location)(0x80bb5c0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624982, loc:(*time.Location)(0x80bb5c0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-7c4bdb86cc\" is progressing."}}, CollisionCount:(*int32)(nil)}
+Jun 20 10:56:27.037: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624983, loc:(*time.Location)(0x80bb5c0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624983, loc:(*time.Location)(0x80bb5c0)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624983, loc:(*time.Location)(0x80bb5c0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624982, loc:(*time.Location)(0x80bb5c0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-7c4bdb86cc\" is progressing."}}, CollisionCount:(*int32)(nil)}
+Jun 20 10:56:29.037: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624983, loc:(*time.Location)(0x80bb5c0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624983, loc:(*time.Location)(0x80bb5c0)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624983, loc:(*time.Location)(0x80bb5c0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624982, loc:(*time.Location)(0x80bb5c0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-7c4bdb86cc\" is progressing."}}, CollisionCount:(*int32)(nil)}
+Jun 20 10:56:31.037: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624983, loc:(*time.Location)(0x80bb5c0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624983, loc:(*time.Location)(0x80bb5c0)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624983, loc:(*time.Location)(0x80bb5c0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624982, loc:(*time.Location)(0x80bb5c0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-7c4bdb86cc\" is progressing."}}, CollisionCount:(*int32)(nil)}
+Jun 20 10:56:33.037: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624983, loc:(*time.Location)(0x80bb5c0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624983, loc:(*time.Location)(0x80bb5c0)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624983, loc:(*time.Location)(0x80bb5c0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63696624982, loc:(*time.Location)(0x80bb5c0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-7c4bdb86cc\" is progressing."}}, CollisionCount:(*int32)(nil)}
+Jun 20 10:56:35.865: INFO: Waited 822.637009ms for the sample-apiserver to be ready to handle requests.
+[AfterEach] [sig-api-machinery] Aggregator
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/aggregator.go:67
+[AfterEach] [sig-api-machinery] Aggregator
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:56:36.272: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "aggregator-7278" for this suite.
+Jun 20 10:56:42.415: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:56:42.481: INFO: namespace aggregator-7278 deletion completed in 6.16425565s
+
+• [SLOW TEST:20.027 seconds]
+[sig-api-machinery] Aggregator
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23
+  Should be able to support the 1.10 Sample API Server using the current Aggregator [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSS
+------------------------------
+[k8s.io] Kubelet when scheduling a busybox Pod with hostAliases 
+  should write entries to /etc/hosts [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:56:42.481: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename kubelet-test
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37
+[It] should write entries to /etc/hosts [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[AfterEach] [k8s.io] Kubelet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:56:46.536: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubelet-test-8892" for this suite.
+Jun 20 10:57:28.549: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:57:28.619: INFO: namespace kubelet-test-8892 deletion completed in 42.079905782s
+
+• [SLOW TEST:46.138 seconds]
+[k8s.io] Kubelet
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  when scheduling a busybox Pod with hostAliases
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:136
+    should write entries to /etc/hosts [LinuxOnly] [NodeConformance] [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl run --rm job 
+  should create a job from an image, then delete the job  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:57:28.619: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:221
+[It] should create a job from an image, then delete the job  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: executing a command with run --rm and attach with stdin
+Jun 20 10:57:28.652: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 --namespace=kubectl-6818 run e2e-test-rm-busybox-job --image=docker.io/library/busybox:1.29 --rm=true --generator=job/v1 --restart=OnFailure --attach=true --stdin -- sh -c cat && echo 'stdin closed''
+Jun 20 10:57:30.281: INFO: stderr: "kubectl run --generator=job/v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\nIf you don't see a command prompt, try pressing enter.\n"
+Jun 20 10:57:30.281: INFO: stdout: "abcd1234stdin closed\njob.batch \"e2e-test-rm-busybox-job\" deleted\n"
+STEP: verifying the job e2e-test-rm-busybox-job was deleted
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:57:32.287: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-6818" for this suite.
+Jun 20 10:57:38.299: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:57:38.370: INFO: namespace kubectl-6818 deletion completed in 6.079993477s
+
+• [SLOW TEST:9.751 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl run --rm job
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+    should create a job from an image, then delete the job  [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSS
+------------------------------
+[sig-network] Proxy version v1 
+  should proxy through a service and a pod  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] version v1
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:57:38.370: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename proxy
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should proxy through a service and a pod  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: starting an echo server on multiple ports
+STEP: creating replication controller proxy-service-np785 in namespace proxy-3680
+I0620 10:57:38.422409      15 runners.go:180] Created replication controller with name: proxy-service-np785, namespace: proxy-3680, replica count: 1
+I0620 10:57:39.473008      15 runners.go:180] proxy-service-np785 Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady 
+I0620 10:57:40.473196      15 runners.go:180] proxy-service-np785 Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady 
+I0620 10:57:41.473452      15 runners.go:180] proxy-service-np785 Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady 
+I0620 10:57:42.473716      15 runners.go:180] proxy-service-np785 Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady 
+I0620 10:57:43.473920      15 runners.go:180] proxy-service-np785 Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady 
+I0620 10:57:44.474112      15 runners.go:180] proxy-service-np785 Pods: 1 out of 1 created, 1 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady 
+Jun 20 10:57:44.477: INFO: setup took 6.07312543s, starting test cases
+STEP: running 16 cases, 20 attempts per case, 320 total attempts
+Jun 20 10:57:44.492: INFO: (0) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname2/proxy/: bar (200; 14.763481ms)
+Jun 20 10:57:44.492: INFO: (0) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname1/proxy/: foo (200; 15.006764ms)
+Jun 20 10:57:44.492: INFO: (0) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname2/proxy/: bar (200; 15.278059ms)
+Jun 20 10:57:44.493: INFO: (0) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:162/proxy/: bar (200; 16.299839ms)
+Jun 20 10:57:44.493: INFO: (0) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:162/proxy/: bar (200; 15.772181ms)
+Jun 20 10:57:44.500: INFO: (0) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname1/proxy/: foo (200; 23.469473ms)
+Jun 20 10:57:44.501: INFO: (0) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:160/proxy/: foo (200; 23.691434ms)
+Jun 20 10:57:44.501: INFO: (0) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:160/proxy/: foo (200; 23.907297ms)
+Jun 20 10:57:44.505: INFO: (0) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:1080/proxy/: ... (200; 28.19318ms)
+Jun 20 10:57:44.505: INFO: (0) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5/proxy/: test (200; 28.493445ms)
+Jun 20 10:57:44.506: INFO: (0) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:1080/proxy/: test<... (200; 29.596158ms)
+Jun 20 10:57:44.508: INFO: (0) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:443/proxy/: ... (200; 13.36932ms)
+Jun 20 10:57:44.534: INFO: (1) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:462/proxy/: tls qux (200; 13.110117ms)
+Jun 20 10:57:44.535: INFO: (1) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:160/proxy/: foo (200; 13.777338ms)
+Jun 20 10:57:44.535: INFO: (1) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:162/proxy/: bar (200; 13.575906ms)
+Jun 20 10:57:44.535: INFO: (1) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:1080/proxy/: test<... (200; 13.815463ms)
+Jun 20 10:57:44.535: INFO: (1) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:162/proxy/: bar (200; 14.265335ms)
+Jun 20 10:57:44.537: INFO: (1) /api/v1/namespaces/proxy-3680/services/https:proxy-service-np785:tlsportname1/proxy/: tls baz (200; 15.252492ms)
+Jun 20 10:57:44.537: INFO: (1) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname1/proxy/: foo (200; 15.296986ms)
+Jun 20 10:57:44.537: INFO: (1) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname2/proxy/: bar (200; 16.189149ms)
+Jun 20 10:57:44.552: INFO: (1) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname1/proxy/: foo (200; 30.936143ms)
+Jun 20 10:57:44.553: INFO: (1) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5/proxy/: test (200; 32.054255ms)
+Jun 20 10:57:44.553: INFO: (1) /api/v1/namespaces/proxy-3680/services/https:proxy-service-np785:tlsportname2/proxy/: tls qux (200; 31.940939ms)
+Jun 20 10:57:44.554: INFO: (1) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname2/proxy/: bar (200; 32.100072ms)
+Jun 20 10:57:44.561: INFO: (2) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:162/proxy/: bar (200; 6.705151ms)
+Jun 20 10:57:44.565: INFO: (2) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:160/proxy/: foo (200; 10.478127ms)
+Jun 20 10:57:44.565: INFO: (2) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:460/proxy/: tls baz (200; 10.761352ms)
+Jun 20 10:57:44.565: INFO: (2) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:1080/proxy/: test<... (200; 11.044254ms)
+Jun 20 10:57:44.565: INFO: (2) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:160/proxy/: foo (200; 11.55804ms)
+Jun 20 10:57:44.565: INFO: (2) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5/proxy/: test (200; 11.159655ms)
+Jun 20 10:57:44.566: INFO: (2) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname2/proxy/: bar (200; 11.266445ms)
+Jun 20 10:57:44.566: INFO: (2) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:1080/proxy/: ... (200; 11.715299ms)
+Jun 20 10:57:44.566: INFO: (2) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:162/proxy/: bar (200; 11.510501ms)
+Jun 20 10:57:44.566: INFO: (2) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:443/proxy/: test<... (200; 8.546455ms)
+Jun 20 10:57:44.580: INFO: (3) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:443/proxy/: ... (200; 10.782117ms)
+Jun 20 10:57:44.580: INFO: (3) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:160/proxy/: foo (200; 10.354585ms)
+Jun 20 10:57:44.580: INFO: (3) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:160/proxy/: foo (200; 10.732509ms)
+Jun 20 10:57:44.580: INFO: (3) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:462/proxy/: tls qux (200; 10.621838ms)
+Jun 20 10:57:44.580: INFO: (3) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:162/proxy/: bar (200; 10.739638ms)
+Jun 20 10:57:44.580: INFO: (3) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5/proxy/: test (200; 10.849923ms)
+Jun 20 10:57:44.581: INFO: (3) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:460/proxy/: tls baz (200; 10.963098ms)
+Jun 20 10:57:44.581: INFO: (3) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname2/proxy/: bar (200; 11.008491ms)
+Jun 20 10:57:44.581: INFO: (3) /api/v1/namespaces/proxy-3680/services/https:proxy-service-np785:tlsportname1/proxy/: tls baz (200; 11.592983ms)
+Jun 20 10:57:44.582: INFO: (3) /api/v1/namespaces/proxy-3680/services/https:proxy-service-np785:tlsportname2/proxy/: tls qux (200; 11.81508ms)
+Jun 20 10:57:44.582: INFO: (3) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname2/proxy/: bar (200; 12.370846ms)
+Jun 20 10:57:44.582: INFO: (3) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname1/proxy/: foo (200; 12.59822ms)
+Jun 20 10:57:44.582: INFO: (3) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname1/proxy/: foo (200; 13.162374ms)
+Jun 20 10:57:44.595: INFO: (4) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:1080/proxy/: ... (200; 12.875617ms)
+Jun 20 10:57:44.596: INFO: (4) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:443/proxy/: test<... (200; 13.791031ms)
+Jun 20 10:57:44.597: INFO: (4) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:162/proxy/: bar (200; 13.979253ms)
+Jun 20 10:57:44.597: INFO: (4) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5/proxy/: test (200; 14.610225ms)
+Jun 20 10:57:44.597: INFO: (4) /api/v1/namespaces/proxy-3680/services/https:proxy-service-np785:tlsportname2/proxy/: tls qux (200; 14.402905ms)
+Jun 20 10:57:44.598: INFO: (4) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:462/proxy/: tls qux (200; 15.029859ms)
+Jun 20 10:57:44.598: INFO: (4) /api/v1/namespaces/proxy-3680/services/https:proxy-service-np785:tlsportname1/proxy/: tls baz (200; 15.01402ms)
+Jun 20 10:57:44.598: INFO: (4) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:460/proxy/: tls baz (200; 15.115605ms)
+Jun 20 10:57:44.598: INFO: (4) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:160/proxy/: foo (200; 15.412729ms)
+Jun 20 10:57:44.598: INFO: (4) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:162/proxy/: bar (200; 15.623043ms)
+Jun 20 10:57:44.616: INFO: (4) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:160/proxy/: foo (200; 32.825303ms)
+Jun 20 10:57:44.616: INFO: (4) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname2/proxy/: bar (200; 32.931504ms)
+Jun 20 10:57:44.616: INFO: (4) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname2/proxy/: bar (200; 32.802419ms)
+Jun 20 10:57:44.616: INFO: (4) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname1/proxy/: foo (200; 33.071752ms)
+Jun 20 10:57:44.625: INFO: (5) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:443/proxy/: test<... (200; 9.249722ms)
+Jun 20 10:57:44.626: INFO: (5) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:460/proxy/: tls baz (200; 9.192179ms)
+Jun 20 10:57:44.626: INFO: (5) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:462/proxy/: tls qux (200; 9.461082ms)
+Jun 20 10:57:44.627: INFO: (5) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:162/proxy/: bar (200; 11.144321ms)
+Jun 20 10:57:44.628: INFO: (5) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:160/proxy/: foo (200; 11.691972ms)
+Jun 20 10:57:44.629: INFO: (5) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:162/proxy/: bar (200; 12.390136ms)
+Jun 20 10:57:44.629: INFO: (5) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:160/proxy/: foo (200; 12.706047ms)
+Jun 20 10:57:44.629: INFO: (5) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:1080/proxy/: ... (200; 13.169721ms)
+Jun 20 10:57:44.630: INFO: (5) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5/proxy/: test (200; 13.116341ms)
+Jun 20 10:57:44.632: INFO: (5) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname2/proxy/: bar (200; 15.494855ms)
+Jun 20 10:57:44.632: INFO: (5) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname1/proxy/: foo (200; 15.740164ms)
+Jun 20 10:57:44.632: INFO: (5) /api/v1/namespaces/proxy-3680/services/https:proxy-service-np785:tlsportname2/proxy/: tls qux (200; 15.841017ms)
+Jun 20 10:57:44.633: INFO: (5) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname2/proxy/: bar (200; 16.001483ms)
+Jun 20 10:57:44.633: INFO: (5) /api/v1/namespaces/proxy-3680/services/https:proxy-service-np785:tlsportname1/proxy/: tls baz (200; 16.254843ms)
+Jun 20 10:57:44.633: INFO: (5) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname1/proxy/: foo (200; 16.095075ms)
+Jun 20 10:57:44.660: INFO: (6) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:1080/proxy/: ... (200; 26.742708ms)
+Jun 20 10:57:44.660: INFO: (6) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname2/proxy/: bar (200; 27.59643ms)
+Jun 20 10:57:44.661: INFO: (6) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:462/proxy/: tls qux (200; 27.463258ms)
+Jun 20 10:57:44.661: INFO: (6) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:160/proxy/: foo (200; 27.565842ms)
+Jun 20 10:57:44.661: INFO: (6) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:160/proxy/: foo (200; 27.976137ms)
+Jun 20 10:57:44.661: INFO: (6) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5/proxy/: test (200; 28.109347ms)
+Jun 20 10:57:44.661: INFO: (6) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:443/proxy/: test<... (200; 28.268315ms)
+Jun 20 10:57:44.661: INFO: (6) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:162/proxy/: bar (200; 28.202864ms)
+Jun 20 10:57:44.661: INFO: (6) /api/v1/namespaces/proxy-3680/services/https:proxy-service-np785:tlsportname1/proxy/: tls baz (200; 28.446176ms)
+Jun 20 10:57:44.661: INFO: (6) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:460/proxy/: tls baz (200; 28.380012ms)
+Jun 20 10:57:44.663: INFO: (6) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname1/proxy/: foo (200; 30.452476ms)
+Jun 20 10:57:44.663: INFO: (6) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname1/proxy/: foo (200; 30.008348ms)
+Jun 20 10:57:44.664: INFO: (6) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:162/proxy/: bar (200; 30.624703ms)
+Jun 20 10:57:44.664: INFO: (6) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname2/proxy/: bar (200; 30.726454ms)
+Jun 20 10:57:44.673: INFO: (7) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5/proxy/: test (200; 8.638625ms)
+Jun 20 10:57:44.680: INFO: (7) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:162/proxy/: bar (200; 15.4328ms)
+Jun 20 10:57:44.680: INFO: (7) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:460/proxy/: tls baz (200; 15.648783ms)
+Jun 20 10:57:44.681: INFO: (7) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:443/proxy/: ... (200; 17.21772ms)
+Jun 20 10:57:44.682: INFO: (7) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:162/proxy/: bar (200; 17.403983ms)
+Jun 20 10:57:44.682: INFO: (7) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:1080/proxy/: test<... (200; 17.388538ms)
+Jun 20 10:57:44.684: INFO: (7) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname2/proxy/: bar (200; 19.419057ms)
+Jun 20 10:57:44.684: INFO: (7) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname1/proxy/: foo (200; 19.942096ms)
+Jun 20 10:57:44.684: INFO: (7) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname2/proxy/: bar (200; 19.749832ms)
+Jun 20 10:57:44.684: INFO: (7) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname1/proxy/: foo (200; 19.521687ms)
+Jun 20 10:57:44.702: INFO: (8) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:160/proxy/: foo (200; 16.723547ms)
+Jun 20 10:57:44.702: INFO: (8) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:1080/proxy/: ... (200; 16.971541ms)
+Jun 20 10:57:44.702: INFO: (8) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:460/proxy/: tls baz (200; 16.907142ms)
+Jun 20 10:57:44.702: INFO: (8) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:443/proxy/: test<... (200; 17.494256ms)
+Jun 20 10:57:44.703: INFO: (8) /api/v1/namespaces/proxy-3680/services/https:proxy-service-np785:tlsportname1/proxy/: tls baz (200; 18.398056ms)
+Jun 20 10:57:44.703: INFO: (8) /api/v1/namespaces/proxy-3680/services/https:proxy-service-np785:tlsportname2/proxy/: tls qux (200; 18.632584ms)
+Jun 20 10:57:44.704: INFO: (8) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:162/proxy/: bar (200; 18.538782ms)
+Jun 20 10:57:44.704: INFO: (8) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:462/proxy/: tls qux (200; 18.661124ms)
+Jun 20 10:57:44.704: INFO: (8) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:162/proxy/: bar (200; 18.965552ms)
+Jun 20 10:57:44.704: INFO: (8) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5/proxy/: test (200; 19.763595ms)
+Jun 20 10:57:44.704: INFO: (8) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:160/proxy/: foo (200; 19.727579ms)
+Jun 20 10:57:44.726: INFO: (8) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname2/proxy/: bar (200; 41.130918ms)
+Jun 20 10:57:44.726: INFO: (8) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname1/proxy/: foo (200; 41.476974ms)
+Jun 20 10:57:44.726: INFO: (8) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname2/proxy/: bar (200; 41.381971ms)
+Jun 20 10:57:44.727: INFO: (8) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname1/proxy/: foo (200; 41.993783ms)
+Jun 20 10:57:44.743: INFO: (9) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:462/proxy/: tls qux (200; 16.774403ms)
+Jun 20 10:57:44.744: INFO: (9) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:162/proxy/: bar (200; 16.831117ms)
+Jun 20 10:57:44.744: INFO: (9) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:443/proxy/: test<... (200; 17.334164ms)
+Jun 20 10:57:44.745: INFO: (9) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5/proxy/: test (200; 17.932566ms)
+Jun 20 10:57:44.745: INFO: (9) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:1080/proxy/: ... (200; 18.402358ms)
+Jun 20 10:57:44.746: INFO: (9) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname1/proxy/: foo (200; 19.106117ms)
+Jun 20 10:57:44.746: INFO: (9) /api/v1/namespaces/proxy-3680/services/https:proxy-service-np785:tlsportname2/proxy/: tls qux (200; 18.720861ms)
+Jun 20 10:57:44.746: INFO: (9) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:160/proxy/: foo (200; 19.186804ms)
+Jun 20 10:57:44.746: INFO: (9) /api/v1/namespaces/proxy-3680/services/https:proxy-service-np785:tlsportname1/proxy/: tls baz (200; 19.512323ms)
+Jun 20 10:57:44.746: INFO: (9) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:460/proxy/: tls baz (200; 19.385904ms)
+Jun 20 10:57:44.760: INFO: (9) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname1/proxy/: foo (200; 33.211938ms)
+Jun 20 10:57:44.760: INFO: (9) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname2/proxy/: bar (200; 33.027694ms)
+Jun 20 10:57:44.760: INFO: (9) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname2/proxy/: bar (200; 33.253967ms)
+Jun 20 10:57:44.761: INFO: (9) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:160/proxy/: foo (200; 33.389348ms)
+Jun 20 10:57:44.769: INFO: (10) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:462/proxy/: tls qux (200; 8.049575ms)
+Jun 20 10:57:44.770: INFO: (10) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:160/proxy/: foo (200; 9.560966ms)
+Jun 20 10:57:44.772: INFO: (10) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:460/proxy/: tls baz (200; 10.726046ms)
+Jun 20 10:57:44.790: INFO: (10) /api/v1/namespaces/proxy-3680/services/https:proxy-service-np785:tlsportname2/proxy/: tls qux (200; 28.764544ms)
+Jun 20 10:57:44.790: INFO: (10) /api/v1/namespaces/proxy-3680/services/https:proxy-service-np785:tlsportname1/proxy/: tls baz (200; 28.958118ms)
+Jun 20 10:57:44.790: INFO: (10) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname1/proxy/: foo (200; 29.076931ms)
+Jun 20 10:57:44.791: INFO: (10) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:1080/proxy/: test<... (200; 29.637321ms)
+Jun 20 10:57:44.791: INFO: (10) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:162/proxy/: bar (200; 29.986745ms)
+Jun 20 10:57:44.791: INFO: (10) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:443/proxy/: ... (200; 30.541474ms)
+Jun 20 10:57:44.791: INFO: (10) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:162/proxy/: bar (200; 30.109996ms)
+Jun 20 10:57:44.791: INFO: (10) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:160/proxy/: foo (200; 30.675307ms)
+Jun 20 10:57:44.791: INFO: (10) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5/proxy/: test (200; 30.405181ms)
+Jun 20 10:57:44.793: INFO: (10) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname1/proxy/: foo (200; 32.141684ms)
+Jun 20 10:57:44.793: INFO: (10) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname2/proxy/: bar (200; 32.050088ms)
+Jun 20 10:57:44.793: INFO: (10) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname2/proxy/: bar (200; 32.360475ms)
+Jun 20 10:57:44.807: INFO: (11) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:160/proxy/: foo (200; 13.539468ms)
+Jun 20 10:57:44.808: INFO: (11) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:1080/proxy/: test<... (200; 13.441914ms)
+Jun 20 10:57:44.808: INFO: (11) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:162/proxy/: bar (200; 13.872152ms)
+Jun 20 10:57:44.809: INFO: (11) /api/v1/namespaces/proxy-3680/services/https:proxy-service-np785:tlsportname1/proxy/: tls baz (200; 15.704751ms)
+Jun 20 10:57:44.810: INFO: (11) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:160/proxy/: foo (200; 16.22163ms)
+Jun 20 10:57:44.810: INFO: (11) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5/proxy/: test (200; 15.875526ms)
+Jun 20 10:57:44.810: INFO: (11) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:1080/proxy/: ... (200; 16.534349ms)
+Jun 20 10:57:44.810: INFO: (11) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:443/proxy/: test<... (200; 24.014491ms)
+Jun 20 10:57:44.849: INFO: (12) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5/proxy/: test (200; 23.880556ms)
+Jun 20 10:57:44.854: INFO: (12) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:1080/proxy/: ... (200; 29.494768ms)
+Jun 20 10:57:44.855: INFO: (12) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname2/proxy/: bar (200; 30.29145ms)
+Jun 20 10:57:44.856: INFO: (12) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:462/proxy/: tls qux (200; 30.702472ms)
+Jun 20 10:57:44.856: INFO: (12) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:160/proxy/: foo (200; 31.029719ms)
+Jun 20 10:57:44.856: INFO: (12) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:162/proxy/: bar (200; 31.043441ms)
+Jun 20 10:57:44.856: INFO: (12) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:160/proxy/: foo (200; 31.082327ms)
+Jun 20 10:57:44.856: INFO: (12) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:443/proxy/: test (200; 10.180438ms)
+Jun 20 10:57:44.887: INFO: (13) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:160/proxy/: foo (200; 9.947196ms)
+Jun 20 10:57:44.887: INFO: (13) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:1080/proxy/: ... (200; 10.544036ms)
+Jun 20 10:57:44.888: INFO: (13) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:462/proxy/: tls qux (200; 11.364422ms)
+Jun 20 10:57:44.889: INFO: (13) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:1080/proxy/: test<... (200; 11.212122ms)
+Jun 20 10:57:44.889: INFO: (13) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:443/proxy/: test<... (200; 21.520884ms)
+Jun 20 10:57:44.925: INFO: (14) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:162/proxy/: bar (200; 29.053032ms)
+Jun 20 10:57:44.926: INFO: (14) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5/proxy/: test (200; 30.343551ms)
+Jun 20 10:57:44.927: INFO: (14) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:460/proxy/: tls baz (200; 30.820951ms)
+Jun 20 10:57:44.927: INFO: (14) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:160/proxy/: foo (200; 30.883278ms)
+Jun 20 10:57:44.927: INFO: (14) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:162/proxy/: bar (200; 31.100469ms)
+Jun 20 10:57:44.927: INFO: (14) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:160/proxy/: foo (200; 31.319862ms)
+Jun 20 10:57:44.927: INFO: (14) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:443/proxy/: ... (200; 31.455189ms)
+Jun 20 10:57:44.939: INFO: (14) /api/v1/namespaces/proxy-3680/services/https:proxy-service-np785:tlsportname2/proxy/: tls qux (200; 42.95742ms)
+Jun 20 10:57:44.939: INFO: (14) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname2/proxy/: bar (200; 42.951625ms)
+Jun 20 10:57:44.940: INFO: (14) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname1/proxy/: foo (200; 43.38569ms)
+Jun 20 10:57:44.940: INFO: (14) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname1/proxy/: foo (200; 43.387115ms)
+Jun 20 10:57:44.940: INFO: (14) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname2/proxy/: bar (200; 43.562478ms)
+Jun 20 10:57:44.951: INFO: (15) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:1080/proxy/: ... (200; 10.934346ms)
+Jun 20 10:57:44.955: INFO: (15) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:460/proxy/: tls baz (200; 15.361342ms)
+Jun 20 10:57:44.956: INFO: (15) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:160/proxy/: foo (200; 16.042898ms)
+Jun 20 10:57:44.957: INFO: (15) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname1/proxy/: foo (200; 17.472033ms)
+Jun 20 10:57:44.957: INFO: (15) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:462/proxy/: tls qux (200; 17.297316ms)
+Jun 20 10:57:44.957: INFO: (15) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:162/proxy/: bar (200; 17.292553ms)
+Jun 20 10:57:44.958: INFO: (15) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:443/proxy/: test (200; 18.332ms)
+Jun 20 10:57:44.958: INFO: (15) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:162/proxy/: bar (200; 18.294574ms)
+Jun 20 10:57:44.958: INFO: (15) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:1080/proxy/: test<... (200; 18.014048ms)
+Jun 20 10:57:44.978: INFO: (15) /api/v1/namespaces/proxy-3680/services/https:proxy-service-np785:tlsportname1/proxy/: tls baz (200; 37.891978ms)
+Jun 20 10:57:44.978: INFO: (15) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname1/proxy/: foo (200; 37.92254ms)
+Jun 20 10:57:44.978: INFO: (15) /api/v1/namespaces/proxy-3680/services/https:proxy-service-np785:tlsportname2/proxy/: tls qux (200; 37.697625ms)
+Jun 20 10:57:44.981: INFO: (15) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:160/proxy/: foo (200; 40.65145ms)
+Jun 20 10:57:44.981: INFO: (15) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname2/proxy/: bar (200; 40.759822ms)
+Jun 20 10:57:44.981: INFO: (15) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname2/proxy/: bar (200; 40.919885ms)
+Jun 20 10:57:45.005: INFO: (16) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:162/proxy/: bar (200; 23.213411ms)
+Jun 20 10:57:45.005: INFO: (16) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:462/proxy/: tls qux (200; 23.557959ms)
+Jun 20 10:57:45.006: INFO: (16) /api/v1/namespaces/proxy-3680/services/https:proxy-service-np785:tlsportname1/proxy/: tls baz (200; 24.216825ms)
+Jun 20 10:57:45.006: INFO: (16) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5/proxy/: test (200; 24.16121ms)
+Jun 20 10:57:45.006: INFO: (16) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:1080/proxy/: ... (200; 24.829507ms)
+Jun 20 10:57:45.007: INFO: (16) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:460/proxy/: tls baz (200; 25.367839ms)
+Jun 20 10:57:45.007: INFO: (16) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:160/proxy/: foo (200; 25.623533ms)
+Jun 20 10:57:45.008: INFO: (16) /api/v1/namespaces/proxy-3680/services/https:proxy-service-np785:tlsportname2/proxy/: tls qux (200; 26.340877ms)
+Jun 20 10:57:45.008: INFO: (16) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:1080/proxy/: test<... (200; 26.226903ms)
+Jun 20 10:57:45.008: INFO: (16) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname1/proxy/: foo (200; 26.510144ms)
+Jun 20 10:57:45.008: INFO: (16) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:162/proxy/: bar (200; 26.399389ms)
+Jun 20 10:57:45.009: INFO: (16) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname2/proxy/: bar (200; 26.854612ms)
+Jun 20 10:57:45.009: INFO: (16) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:443/proxy/: ... (200; 27.292304ms)
+Jun 20 10:57:45.051: INFO: (17) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:462/proxy/: tls qux (200; 28.293565ms)
+Jun 20 10:57:45.053: INFO: (17) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:1080/proxy/: test<... (200; 29.754506ms)
+Jun 20 10:57:45.056: INFO: (17) /api/v1/namespaces/proxy-3680/services/https:proxy-service-np785:tlsportname2/proxy/: tls qux (200; 32.924842ms)
+Jun 20 10:57:45.056: INFO: (17) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname1/proxy/: foo (200; 33.326696ms)
+Jun 20 10:57:45.056: INFO: (17) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:162/proxy/: bar (200; 33.015706ms)
+Jun 20 10:57:45.056: INFO: (17) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5/proxy/: test (200; 33.14146ms)
+Jun 20 10:57:45.057: INFO: (17) /api/v1/namespaces/proxy-3680/services/https:proxy-service-np785:tlsportname1/proxy/: tls baz (200; 33.817845ms)
+Jun 20 10:57:45.057: INFO: (17) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:443/proxy/: ... (200; 20.69624ms)
+Jun 20 10:57:45.083: INFO: (18) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:443/proxy/: test (200; 20.917863ms)
+Jun 20 10:57:45.084: INFO: (18) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:162/proxy/: bar (200; 21.680469ms)
+Jun 20 10:57:45.084: INFO: (18) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname2/proxy/: bar (200; 21.94956ms)
+Jun 20 10:57:45.084: INFO: (18) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname1/proxy/: foo (200; 22.081669ms)
+Jun 20 10:57:45.086: INFO: (18) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:462/proxy/: tls qux (200; 23.741753ms)
+Jun 20 10:57:45.086: INFO: (18) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname1/proxy/: foo (200; 23.636136ms)
+Jun 20 10:57:45.086: INFO: (18) /api/v1/namespaces/proxy-3680/services/https:proxy-service-np785:tlsportname1/proxy/: tls baz (200; 23.766407ms)
+Jun 20 10:57:45.086: INFO: (18) /api/v1/namespaces/proxy-3680/services/https:proxy-service-np785:tlsportname2/proxy/: tls qux (200; 23.981631ms)
+Jun 20 10:57:45.086: INFO: (18) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:162/proxy/: bar (200; 23.829783ms)
+Jun 20 10:57:45.086: INFO: (18) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:160/proxy/: foo (200; 23.999688ms)
+Jun 20 10:57:45.086: INFO: (18) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:1080/proxy/: test<... (200; 24.397134ms)
+Jun 20 10:57:45.086: INFO: (18) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:460/proxy/: tls baz (200; 24.343151ms)
+Jun 20 10:57:45.086: INFO: (18) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname2/proxy/: bar (200; 24.091512ms)
+Jun 20 10:57:45.087: INFO: (18) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:160/proxy/: foo (200; 24.492807ms)
+Jun 20 10:57:45.098: INFO: (19) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:160/proxy/: foo (200; 10.375685ms)
+Jun 20 10:57:45.098: INFO: (19) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:1080/proxy/: test<... (200; 11.361011ms)
+Jun 20 10:57:45.114: INFO: (19) /api/v1/namespaces/proxy-3680/pods/proxy-service-np785-78lz5:162/proxy/: bar (200; 26.905128ms)
+Jun 20 10:57:45.114: INFO: (19) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname1/proxy/: foo (200; 26.891912ms)
+Jun 20 10:57:45.115: INFO: (19) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:162/proxy/: bar (200; 27.804693ms)
+Jun 20 10:57:45.115: INFO: (19) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname1/proxy/: foo (200; 28.311619ms)
+Jun 20 10:57:45.115: INFO: (19) /api/v1/namespaces/proxy-3680/services/https:proxy-service-np785:tlsportname1/proxy/: tls baz (200; 27.952762ms)
+Jun 20 10:57:45.116: INFO: (19) /api/v1/namespaces/proxy-3680/services/https:proxy-service-np785:tlsportname2/proxy/: tls qux (200; 28.583177ms)
+Jun 20 10:57:45.116: INFO: (19) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:462/proxy/: tls qux (200; 28.336735ms)
+Jun 20 10:57:45.116: INFO: (19) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:460/proxy/: tls baz (200; 28.209249ms)
+Jun 20 10:57:45.116: INFO: (19) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:1080/proxy/: ... (200; 28.828953ms)
+Jun 20 10:57:45.116: INFO: (19) /api/v1/namespaces/proxy-3680/pods/https:proxy-service-np785-78lz5:443/proxy/: test (200; 28.843966ms)
+Jun 20 10:57:45.121: INFO: (19) /api/v1/namespaces/proxy-3680/services/http:proxy-service-np785:portname2/proxy/: bar (200; 34.421604ms)
+Jun 20 10:57:45.122: INFO: (19) /api/v1/namespaces/proxy-3680/pods/http:proxy-service-np785-78lz5:160/proxy/: foo (200; 34.188872ms)
+Jun 20 10:57:45.122: INFO: (19) /api/v1/namespaces/proxy-3680/services/proxy-service-np785:portname2/proxy/: bar (200; 34.871759ms)
+STEP: deleting ReplicationController proxy-service-np785 in namespace proxy-3680, will wait for the garbage collector to delete the pods
+Jun 20 10:57:45.185: INFO: Deleting ReplicationController proxy-service-np785 took: 6.658151ms
+Jun 20 10:57:45.286: INFO: Terminating ReplicationController proxy-service-np785 pods took: 100.284795ms
+[AfterEach] version v1
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:57:47.486: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "proxy-3680" for this suite.
+Jun 20 10:57:53.500: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:57:53.563: INFO: namespace proxy-3680 deletion completed in 6.073565234s
+
+• [SLOW TEST:15.193 seconds]
+[sig-network] Proxy
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23
+  version v1
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/proxy.go:58
+    should proxy through a service and a pod  [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Proxy server 
+  should support proxy with --port 0  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:57:53.564: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:221
+[It] should support proxy with --port 0  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: starting the proxy server
+Jun 20 10:57:53.594: INFO: Asynchronously running '/usr/local/bin/kubectl kubectl --kubeconfig=/tmp/kubeconfig-878248618 proxy -p 0 --disable-filter'
+STEP: curling proxy /api/ output
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:57:53.649: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-1849" for this suite.
+Jun 20 10:57:59.665: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:57:59.739: INFO: namespace kubectl-1849 deletion completed in 6.083662129s
+
+• [SLOW TEST:6.175 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Proxy server
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+    should support proxy with --port 0  [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] [sig-node] Events 
+  should be sent by kubelets and the scheduler about pods scheduling and running  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] [sig-node] Events
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:57:59.740: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename events
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be sent by kubelets and the scheduler about pods scheduling and running  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: creating the pod
+STEP: submitting the pod to kubernetes
+STEP: verifying the pod is in kubernetes
+STEP: retrieving the pod
+Jun 20 10:58:01.807: INFO: &Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:send-events-6b1e247c-f1a9-4cd2-a736-e2bf41127860,GenerateName:,Namespace:events-4145,SelfLink:/api/v1/namespaces/events-4145/pods/send-events-6b1e247c-f1a9-4cd2-a736-e2bf41127860,UID:6d5566fe-0a09-4d1c-8ffa-98986ba0b325,ResourceVersion:20841,Generation:0,CreationTimestamp:2019-06-20 10:57:59 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: foo,time: 782141508,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-dbcv7 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-dbcv7,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{p gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1 [] []  [{ 0 80 TCP }] [] [] {map[] map[]} [{default-token-dbcv7 true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*30,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-100-10-111.eu-west-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002d79df0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002d79e10}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:57:59 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:58:01 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:58:01 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 10:57:59 +0000 UTC  }],Message:,Reason:,HostIP:10.100.10.111,PodIP:10.38.0.2,StartTime:2019-06-20 10:57:59 +0000 UTC,ContainerStatuses:[{p {nil ContainerStateRunning{StartedAt:2019-06-20 10:58:01 +0000 UTC,} nil} {nil nil nil} true 0 gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1 docker-pullable://gcr.io/kubernetes-e2e-test-images/serve-hostname@sha256:bab70473a6d8ef65a22625dc9a1b0f0452e811530fdbe77e4408523460177ff1 docker://4d1075fb2411b8be222105c3e4f24176912fb97cc3e95145bcdbaf93a9f2679b}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+
+STEP: checking for scheduler event about the pod
+Jun 20 10:58:03.813: INFO: Saw scheduler event for our pod.
+STEP: checking for kubelet event about the pod
+Jun 20 10:58:05.821: INFO: Saw kubelet event for our pod.
+STEP: deleting the pod
+[AfterEach] [k8s.io] [sig-node] Events
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:58:05.828: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "events-4145" for this suite.
+Jun 20 10:58:49.864: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:58:50.010: INFO: namespace events-4145 deletion completed in 44.174712871s
+
+• [SLOW TEST:50.270 seconds]
+[k8s.io] [sig-node] Events
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  should be sent by kubelets and the scheduler about pods scheduling and running  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Subpath Atomic writer volumes 
+  should support subpaths with downward pod [LinuxOnly] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Subpath
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:58:50.010: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename subpath
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] Atomic writer volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:37
+STEP: Setting up data
+[It] should support subpaths with downward pod [LinuxOnly] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating pod pod-subpath-test-downwardapi-57cz
+STEP: Creating a pod to test atomic-volume-subpath
+Jun 20 10:58:50.101: INFO: Waiting up to 5m0s for pod "pod-subpath-test-downwardapi-57cz" in namespace "subpath-8416" to be "success or failure"
+Jun 20 10:58:50.116: INFO: Pod "pod-subpath-test-downwardapi-57cz": Phase="Pending", Reason="", readiness=false. Elapsed: 14.755797ms
+Jun 20 10:58:52.119: INFO: Pod "pod-subpath-test-downwardapi-57cz": Phase="Running", Reason="", readiness=true. Elapsed: 2.018001407s
+Jun 20 10:58:54.123: INFO: Pod "pod-subpath-test-downwardapi-57cz": Phase="Running", Reason="", readiness=true. Elapsed: 4.021926323s
+Jun 20 10:58:56.126: INFO: Pod "pod-subpath-test-downwardapi-57cz": Phase="Running", Reason="", readiness=true. Elapsed: 6.025126491s
+Jun 20 10:58:58.130: INFO: Pod "pod-subpath-test-downwardapi-57cz": Phase="Running", Reason="", readiness=true. Elapsed: 8.02944755s
+Jun 20 10:59:00.134: INFO: Pod "pod-subpath-test-downwardapi-57cz": Phase="Running", Reason="", readiness=true. Elapsed: 10.032605825s
+Jun 20 10:59:02.139: INFO: Pod "pod-subpath-test-downwardapi-57cz": Phase="Running", Reason="", readiness=true. Elapsed: 12.038180476s
+Jun 20 10:59:04.143: INFO: Pod "pod-subpath-test-downwardapi-57cz": Phase="Running", Reason="", readiness=true. Elapsed: 14.04161222s
+Jun 20 10:59:06.146: INFO: Pod "pod-subpath-test-downwardapi-57cz": Phase="Running", Reason="", readiness=true. Elapsed: 16.044952528s
+Jun 20 10:59:08.150: INFO: Pod "pod-subpath-test-downwardapi-57cz": Phase="Running", Reason="", readiness=true. Elapsed: 18.048961156s
+Jun 20 10:59:10.153: INFO: Pod "pod-subpath-test-downwardapi-57cz": Phase="Running", Reason="", readiness=true. Elapsed: 20.052440528s
+Jun 20 10:59:12.157: INFO: Pod "pod-subpath-test-downwardapi-57cz": Phase="Running", Reason="", readiness=true. Elapsed: 22.055757074s
+Jun 20 10:59:14.161: INFO: Pod "pod-subpath-test-downwardapi-57cz": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.059621363s
+STEP: Saw pod success
+Jun 20 10:59:14.161: INFO: Pod "pod-subpath-test-downwardapi-57cz" satisfied condition "success or failure"
+Jun 20 10:59:14.164: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-subpath-test-downwardapi-57cz container test-container-subpath-downwardapi-57cz: 
+STEP: delete the pod
+Jun 20 10:59:14.184: INFO: Waiting for pod pod-subpath-test-downwardapi-57cz to disappear
+Jun 20 10:59:14.187: INFO: Pod pod-subpath-test-downwardapi-57cz no longer exists
+STEP: Deleting pod pod-subpath-test-downwardapi-57cz
+Jun 20 10:59:14.187: INFO: Deleting pod "pod-subpath-test-downwardapi-57cz" in namespace "subpath-8416"
+[AfterEach] [sig-storage] Subpath
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:59:14.189: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "subpath-8416" for this suite.
+Jun 20 10:59:20.202: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:59:20.283: INFO: namespace subpath-8416 deletion completed in 6.090623505s
+
+• [SLOW TEST:30.273 seconds]
+[sig-storage] Subpath
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22
+  Atomic writer volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:33
+    should support subpaths with downward pod [LinuxOnly] [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Secrets 
+  should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Secrets
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:59:20.283: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename secrets
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating secret with name secret-test-f5e0197d-5636-40c7-b3a0-d24b57d6d8ab
+STEP: Creating a pod to test consume secrets
+Jun 20 10:59:20.366: INFO: Waiting up to 5m0s for pod "pod-secrets-fc98c8f9-967b-49f7-91e5-06a9477035a4" in namespace "secrets-6221" to be "success or failure"
+Jun 20 10:59:20.371: INFO: Pod "pod-secrets-fc98c8f9-967b-49f7-91e5-06a9477035a4": Phase="Pending", Reason="", readiness=false. Elapsed: 5.527965ms
+Jun 20 10:59:22.374: INFO: Pod "pod-secrets-fc98c8f9-967b-49f7-91e5-06a9477035a4": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.008647456s
+STEP: Saw pod success
+Jun 20 10:59:22.374: INFO: Pod "pod-secrets-fc98c8f9-967b-49f7-91e5-06a9477035a4" satisfied condition "success or failure"
+Jun 20 10:59:22.380: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-secrets-fc98c8f9-967b-49f7-91e5-06a9477035a4 container secret-volume-test: 
+STEP: delete the pod
+Jun 20 10:59:22.404: INFO: Waiting for pod pod-secrets-fc98c8f9-967b-49f7-91e5-06a9477035a4 to disappear
+Jun 20 10:59:22.407: INFO: Pod pod-secrets-fc98c8f9-967b-49f7-91e5-06a9477035a4 no longer exists
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:59:22.407: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "secrets-6221" for this suite.
+Jun 20 10:59:28.420: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:59:28.492: INFO: namespace secrets-6221 deletion completed in 6.082183543s
+STEP: Destroying namespace "secret-namespace-1045" for this suite.
+Jun 20 10:59:34.501: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:59:34.569: INFO: namespace secret-namespace-1045 deletion completed in 6.076755655s
+
+• [SLOW TEST:14.286 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:33
+  should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:59:34.569: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test downward API volume plugin
+Jun 20 10:59:34.614: INFO: Waiting up to 5m0s for pod "downwardapi-volume-910a196c-be28-4811-be72-698a8b6eb59d" in namespace "downward-api-4901" to be "success or failure"
+Jun 20 10:59:34.617: INFO: Pod "downwardapi-volume-910a196c-be28-4811-be72-698a8b6eb59d": Phase="Pending", Reason="", readiness=false. Elapsed: 3.520422ms
+Jun 20 10:59:36.622: INFO: Pod "downwardapi-volume-910a196c-be28-4811-be72-698a8b6eb59d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.008362718s
+STEP: Saw pod success
+Jun 20 10:59:36.622: INFO: Pod "downwardapi-volume-910a196c-be28-4811-be72-698a8b6eb59d" satisfied condition "success or failure"
+Jun 20 10:59:36.625: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod downwardapi-volume-910a196c-be28-4811-be72-698a8b6eb59d container client-container: 
+STEP: delete the pod
+Jun 20 10:59:36.647: INFO: Waiting for pod downwardapi-volume-910a196c-be28-4811-be72-698a8b6eb59d to disappear
+Jun 20 10:59:36.651: INFO: Pod downwardapi-volume-910a196c-be28-4811-be72-698a8b6eb59d no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:59:36.651: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-4901" for this suite.
+Jun 20 10:59:42.667: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:59:42.733: INFO: namespace downward-api-4901 deletion completed in 6.079332468s
+
+• [SLOW TEST:8.164 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SS
+------------------------------
+[sig-api-machinery] Watchers 
+  should be able to start watching from a specific resource version [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:59:42.734: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename watch
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be able to start watching from a specific resource version [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: creating a new configmap
+STEP: modifying the configmap once
+STEP: modifying the configmap a second time
+STEP: deleting the configmap
+STEP: creating a watch on configmaps from the resource version returned by the first update
+STEP: Expecting to observe notifications for all changes to the configmap after the first update
+Jun 20 10:59:42.787: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-resource-version,GenerateName:,Namespace:watch-2637,SelfLink:/api/v1/namespaces/watch-2637/configmaps/e2e-watch-test-resource-version,UID:60a24daa-1710-4702-b196-313638732528,ResourceVersion:21203,Generation:0,CreationTimestamp:2019-06-20 10:59:42 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: from-resource-version,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+Jun 20 10:59:42.788: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-resource-version,GenerateName:,Namespace:watch-2637,SelfLink:/api/v1/namespaces/watch-2637/configmaps/e2e-watch-test-resource-version,UID:60a24daa-1710-4702-b196-313638732528,ResourceVersion:21204,Generation:0,CreationTimestamp:2019-06-20 10:59:42 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: from-resource-version,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+[AfterEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:59:42.788: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "watch-2637" for this suite.
+Jun 20 10:59:48.800: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 10:59:48.865: INFO: namespace watch-2637 deletion completed in 6.074200965s
+
+• [SLOW TEST:6.131 seconds]
+[sig-api-machinery] Watchers
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23
+  should be able to start watching from a specific resource version [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-network] Services 
+  should serve multiport endpoints from pods  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 10:59:48.865: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename services
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:88
+[It] should serve multiport endpoints from pods  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: creating service multi-endpoint-test in namespace services-5133
+STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-5133 to expose endpoints map[]
+Jun 20 10:59:48.909: INFO: Get endpoints failed (2.546903ms elapsed, ignoring for 5s): endpoints "multi-endpoint-test" not found
+Jun 20 10:59:49.912: INFO: successfully validated that service multi-endpoint-test in namespace services-5133 exposes endpoints map[] (1.005723519s elapsed)
+STEP: Creating pod pod1 in namespace services-5133
+STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-5133 to expose endpoints map[pod1:[100]]
+Jun 20 10:59:51.934: INFO: successfully validated that service multi-endpoint-test in namespace services-5133 exposes endpoints map[pod1:[100]] (2.016328084s elapsed)
+STEP: Creating pod pod2 in namespace services-5133
+STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-5133 to expose endpoints map[pod1:[100] pod2:[101]]
+Jun 20 10:59:53.965: INFO: successfully validated that service multi-endpoint-test in namespace services-5133 exposes endpoints map[pod1:[100] pod2:[101]] (2.024404071s elapsed)
+STEP: Deleting pod pod1 in namespace services-5133
+STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-5133 to expose endpoints map[pod2:[101]]
+Jun 20 10:59:54.983: INFO: successfully validated that service multi-endpoint-test in namespace services-5133 exposes endpoints map[pod2:[101]] (1.012578935s elapsed)
+STEP: Deleting pod pod2 in namespace services-5133
+STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-5133 to expose endpoints map[]
+Jun 20 10:59:56.001: INFO: successfully validated that service multi-endpoint-test in namespace services-5133 exposes endpoints map[] (1.012379044s elapsed)
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 10:59:56.020: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "services-5133" for this suite.
+Jun 20 11:00:02.036: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:00:02.125: INFO: namespace services-5133 deletion completed in 6.101292671s
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:92
+
+• [SLOW TEST:13.260 seconds]
+[sig-network] Services
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23
+  should serve multiport endpoints from pods  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook 
+  should execute poststart http hook properly [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Container Lifecycle Hook
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:00:02.126: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename container-lifecycle-hook
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] when create a pod with lifecycle hook
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:63
+STEP: create the container to handle the HTTPGet hook request.
+[It] should execute poststart http hook properly [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: create the pod with lifecycle hook
+STEP: check poststart hook
+STEP: delete the pod with lifecycle hook
+Jun 20 11:00:06.219: INFO: Waiting for pod pod-with-poststart-http-hook to disappear
+Jun 20 11:00:06.223: INFO: Pod pod-with-poststart-http-hook still exists
+Jun 20 11:00:08.223: INFO: Waiting for pod pod-with-poststart-http-hook to disappear
+Jun 20 11:00:08.227: INFO: Pod pod-with-poststart-http-hook still exists
+Jun 20 11:00:10.223: INFO: Waiting for pod pod-with-poststart-http-hook to disappear
+Jun 20 11:00:10.227: INFO: Pod pod-with-poststart-http-hook still exists
+Jun 20 11:00:12.223: INFO: Waiting for pod pod-with-poststart-http-hook to disappear
+Jun 20 11:00:12.227: INFO: Pod pod-with-poststart-http-hook still exists
+Jun 20 11:00:14.223: INFO: Waiting for pod pod-with-poststart-http-hook to disappear
+Jun 20 11:00:14.227: INFO: Pod pod-with-poststart-http-hook still exists
+Jun 20 11:00:16.223: INFO: Waiting for pod pod-with-poststart-http-hook to disappear
+Jun 20 11:00:16.227: INFO: Pod pod-with-poststart-http-hook still exists
+Jun 20 11:00:18.223: INFO: Waiting for pod pod-with-poststart-http-hook to disappear
+Jun 20 11:00:18.227: INFO: Pod pod-with-poststart-http-hook no longer exists
+[AfterEach] [k8s.io] Container Lifecycle Hook
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:00:18.227: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-lifecycle-hook-7753" for this suite.
+Jun 20 11:00:40.240: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:00:40.322: INFO: namespace container-lifecycle-hook-7753 deletion completed in 22.092363624s
+
+• [SLOW TEST:38.196 seconds]
+[k8s.io] Container Lifecycle Hook
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  when create a pod with lifecycle hook
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:42
+    should execute poststart http hook properly [NodeConformance] [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Namespaces [Serial] 
+  should ensure that all services are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-api-machinery] Namespaces [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:00:40.323: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename namespaces
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should ensure that all services are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a test namespace
+STEP: Waiting for a default service account to be provisioned in namespace
+STEP: Creating a service in the namespace
+STEP: Deleting the namespace
+STEP: Waiting for the namespace to be removed.
+STEP: Recreating the namespace
+STEP: Verifying there is no service in the namespace
+[AfterEach] [sig-api-machinery] Namespaces [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:00:46.476: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "namespaces-9126" for this suite.
+Jun 20 11:00:52.489: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:00:52.576: INFO: namespace namespaces-9126 deletion completed in 6.097337936s
+STEP: Destroying namespace "nsdeletetest-4507" for this suite.
+Jun 20 11:00:52.578: INFO: Namespace nsdeletetest-4507 was already deleted
+STEP: Destroying namespace "nsdeletetest-290" for this suite.
+Jun 20 11:00:58.588: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:00:58.737: INFO: namespace nsdeletetest-290 deletion completed in 6.158847139s
+
+• [SLOW TEST:18.414 seconds]
+[sig-api-machinery] Namespaces [Serial]
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23
+  should ensure that all services are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSS
+------------------------------
+[sig-storage] Secrets 
+  should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Secrets
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:00:58.737: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename secrets
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating secret with name secret-test-bdd7719a-4db8-446c-bb08-e3211fcbdb43
+STEP: Creating a pod to test consume secrets
+Jun 20 11:00:58.790: INFO: Waiting up to 5m0s for pod "pod-secrets-dcff6c8f-917b-4b8f-9a37-b3386c4e245b" in namespace "secrets-3404" to be "success or failure"
+Jun 20 11:00:58.794: INFO: Pod "pod-secrets-dcff6c8f-917b-4b8f-9a37-b3386c4e245b": Phase="Pending", Reason="", readiness=false. Elapsed: 3.841933ms
+Jun 20 11:01:00.798: INFO: Pod "pod-secrets-dcff6c8f-917b-4b8f-9a37-b3386c4e245b": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.007345784s
+STEP: Saw pod success
+Jun 20 11:01:00.798: INFO: Pod "pod-secrets-dcff6c8f-917b-4b8f-9a37-b3386c4e245b" satisfied condition "success or failure"
+Jun 20 11:01:00.800: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-secrets-dcff6c8f-917b-4b8f-9a37-b3386c4e245b container secret-volume-test: 
+STEP: delete the pod
+Jun 20 11:01:00.817: INFO: Waiting for pod pod-secrets-dcff6c8f-917b-4b8f-9a37-b3386c4e245b to disappear
+Jun 20 11:01:00.822: INFO: Pod pod-secrets-dcff6c8f-917b-4b8f-9a37-b3386c4e245b no longer exists
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:01:00.822: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "secrets-3404" for this suite.
+Jun 20 11:01:06.836: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:01:06.905: INFO: namespace secrets-3404 deletion completed in 6.079187303s
+
+• [SLOW TEST:8.168 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:33
+  should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSS
+------------------------------
+[k8s.io] Container Runtime blackbox test on terminated container 
+  should report termination message [LinuxOnly] as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Container Runtime
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:01:06.905: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename container-runtime
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should report termination message [LinuxOnly] as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: create the container
+STEP: wait for the container to reach Succeeded
+STEP: get the container status
+STEP: the container should be terminated
+STEP: the termination message should be set
+Jun 20 11:01:08.961: INFO: Expected: &{} to match Container's Termination Message:  --
+STEP: delete the container
+[AfterEach] [k8s.io] Container Runtime
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:01:08.975: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-runtime-9541" for this suite.
+Jun 20 11:01:14.988: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:01:15.068: INFO: namespace container-runtime-9541 deletion completed in 6.088968204s
+
+• [SLOW TEST:8.162 seconds]
+[k8s.io] Container Runtime
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  blackbox test
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/runtime.go:38
+    on terminated container
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/runtime.go:129
+      should report termination message [LinuxOnly] as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]
+      /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected combined 
+  should project all components that make up the projection API [Projection][NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Projected combined
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:01:15.068: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should project all components that make up the projection API [Projection][NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating configMap with name configmap-projected-all-test-volume-f14e466d-3add-450d-a564-64a8d8aa5f22
+STEP: Creating secret with name secret-projected-all-test-volume-ca2b1796-8c42-4442-97a0-b5f25a5d2516
+STEP: Creating a pod to test Check all projections for projected volume plugin
+Jun 20 11:01:15.116: INFO: Waiting up to 5m0s for pod "projected-volume-08fe585f-2d20-4a22-9143-75c5e42f1396" in namespace "projected-7186" to be "success or failure"
+Jun 20 11:01:15.118: INFO: Pod "projected-volume-08fe585f-2d20-4a22-9143-75c5e42f1396": Phase="Pending", Reason="", readiness=false. Elapsed: 2.343571ms
+Jun 20 11:01:17.123: INFO: Pod "projected-volume-08fe585f-2d20-4a22-9143-75c5e42f1396": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.007058419s
+STEP: Saw pod success
+Jun 20 11:01:17.123: INFO: Pod "projected-volume-08fe585f-2d20-4a22-9143-75c5e42f1396" satisfied condition "success or failure"
+Jun 20 11:01:17.126: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod projected-volume-08fe585f-2d20-4a22-9143-75c5e42f1396 container projected-all-volume-test: 
+STEP: delete the pod
+Jun 20 11:01:17.149: INFO: Waiting for pod projected-volume-08fe585f-2d20-4a22-9143-75c5e42f1396 to disappear
+Jun 20 11:01:17.151: INFO: Pod projected-volume-08fe585f-2d20-4a22-9143-75c5e42f1396 no longer exists
+[AfterEach] [sig-storage] Projected combined
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:01:17.151: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-7186" for this suite.
+Jun 20 11:01:23.167: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:01:23.238: INFO: namespace projected-7186 deletion completed in 6.083089418s
+
+• [SLOW TEST:8.170 seconds]
+[sig-storage] Projected combined
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_combined.go:31
+  should project all components that make up the projection API [Projection][NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Docker Containers 
+  should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:01:23.238: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename containers
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test override command
+Jun 20 11:01:23.278: INFO: Waiting up to 5m0s for pod "client-containers-b2b94af0-6cdd-4bf2-a715-435458005855" in namespace "containers-4249" to be "success or failure"
+Jun 20 11:01:23.285: INFO: Pod "client-containers-b2b94af0-6cdd-4bf2-a715-435458005855": Phase="Pending", Reason="", readiness=false. Elapsed: 6.232941ms
+Jun 20 11:01:25.288: INFO: Pod "client-containers-b2b94af0-6cdd-4bf2-a715-435458005855": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.009316413s
+STEP: Saw pod success
+Jun 20 11:01:25.288: INFO: Pod "client-containers-b2b94af0-6cdd-4bf2-a715-435458005855" satisfied condition "success or failure"
+Jun 20 11:01:25.290: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod client-containers-b2b94af0-6cdd-4bf2-a715-435458005855 container test-container: 
+STEP: delete the pod
+Jun 20 11:01:25.307: INFO: Waiting for pod client-containers-b2b94af0-6cdd-4bf2-a715-435458005855 to disappear
+Jun 20 11:01:25.310: INFO: Pod client-containers-b2b94af0-6cdd-4bf2-a715-435458005855 no longer exists
+[AfterEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:01:25.310: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "containers-4249" for this suite.
+Jun 20 11:01:31.323: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:01:31.494: INFO: namespace containers-4249 deletion completed in 6.180672885s
+
+• [SLOW TEST:8.255 seconds]
+[k8s.io] Docker Containers
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl replace 
+  should update a single-container pod's image  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:01:31.494: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:221
+[BeforeEach] [k8s.io] Kubectl replace
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1722
+[It] should update a single-container pod's image  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: running the image docker.io/library/nginx:1.14-alpine
+Jun 20 11:01:31.537: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 run e2e-test-nginx-pod --generator=run-pod/v1 --image=docker.io/library/nginx:1.14-alpine --labels=run=e2e-test-nginx-pod --namespace=kubectl-7938'
+Jun 20 11:01:31.615: INFO: stderr: ""
+Jun 20 11:01:31.615: INFO: stdout: "pod/e2e-test-nginx-pod created\n"
+STEP: verifying the pod e2e-test-nginx-pod is running
+STEP: verifying the pod e2e-test-nginx-pod was created
+Jun 20 11:01:36.665: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pod e2e-test-nginx-pod --namespace=kubectl-7938 -o json'
+Jun 20 11:01:36.729: INFO: stderr: ""
+Jun 20 11:01:36.729: INFO: stdout: "{\n    \"apiVersion\": \"v1\",\n    \"kind\": \"Pod\",\n    \"metadata\": {\n        \"creationTimestamp\": \"2019-06-20T11:01:31Z\",\n        \"labels\": {\n            \"run\": \"e2e-test-nginx-pod\"\n        },\n        \"name\": \"e2e-test-nginx-pod\",\n        \"namespace\": \"kubectl-7938\",\n        \"resourceVersion\": \"21663\",\n        \"selfLink\": \"/api/v1/namespaces/kubectl-7938/pods/e2e-test-nginx-pod\",\n        \"uid\": \"e7804188-a38f-4bb2-af9a-f25d71df68cc\"\n    },\n    \"spec\": {\n        \"containers\": [\n            {\n                \"image\": \"docker.io/library/nginx:1.14-alpine\",\n                \"imagePullPolicy\": \"IfNotPresent\",\n                \"name\": \"e2e-test-nginx-pod\",\n                \"resources\": {},\n                \"terminationMessagePath\": \"/dev/termination-log\",\n                \"terminationMessagePolicy\": \"File\",\n                \"volumeMounts\": [\n                    {\n                        \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n                        \"name\": \"default-token-l7g4c\",\n                        \"readOnly\": true\n                    }\n                ]\n            }\n        ],\n        \"dnsPolicy\": \"ClusterFirst\",\n        \"enableServiceLinks\": true,\n        \"nodeName\": \"ip-10-100-10-111.eu-west-1.compute.internal\",\n        \"priority\": 0,\n        \"restartPolicy\": \"Always\",\n        \"schedulerName\": \"default-scheduler\",\n        \"securityContext\": {},\n        \"serviceAccount\": \"default\",\n        \"serviceAccountName\": \"default\",\n        \"terminationGracePeriodSeconds\": 30,\n        \"tolerations\": [\n            {\n                \"effect\": \"NoExecute\",\n                \"key\": \"node.kubernetes.io/not-ready\",\n                \"operator\": \"Exists\",\n                \"tolerationSeconds\": 300\n            },\n            {\n                \"effect\": \"NoExecute\",\n                \"key\": \"node.kubernetes.io/unreachable\",\n                \"operator\": \"Exists\",\n                \"tolerationSeconds\": 300\n            }\n        ],\n        \"volumes\": [\n            {\n                \"name\": \"default-token-l7g4c\",\n                \"secret\": {\n                    \"defaultMode\": 420,\n                    \"secretName\": \"default-token-l7g4c\"\n                }\n            }\n        ]\n    },\n    \"status\": {\n        \"conditions\": [\n            {\n                \"lastProbeTime\": null,\n                \"lastTransitionTime\": \"2019-06-20T11:01:31Z\",\n                \"status\": \"True\",\n                \"type\": \"Initialized\"\n            },\n            {\n                \"lastProbeTime\": null,\n                \"lastTransitionTime\": \"2019-06-20T11:01:33Z\",\n                \"status\": \"True\",\n                \"type\": \"Ready\"\n            },\n            {\n                \"lastProbeTime\": null,\n                \"lastTransitionTime\": \"2019-06-20T11:01:33Z\",\n                \"status\": \"True\",\n                \"type\": \"ContainersReady\"\n            },\n            {\n                \"lastProbeTime\": null,\n                \"lastTransitionTime\": \"2019-06-20T11:01:31Z\",\n                \"status\": \"True\",\n                \"type\": \"PodScheduled\"\n            }\n        ],\n        \"containerStatuses\": [\n            {\n                \"containerID\": \"docker://cacce40c2064ae6ab108ba4d5543a4960cfe690e6905518411278614dd53c6a4\",\n                \"image\": \"nginx:1.14-alpine\",\n                \"imageID\": \"docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7\",\n                \"lastState\": {},\n                \"name\": \"e2e-test-nginx-pod\",\n                \"ready\": true,\n                \"restartCount\": 0,\n                \"state\": {\n                    \"running\": {\n                        \"startedAt\": \"2019-06-20T11:01:32Z\"\n                    }\n                }\n            }\n        ],\n        \"hostIP\": \"10.100.10.111\",\n        \"phase\": \"Running\",\n        \"podIP\": \"10.38.0.2\",\n        \"qosClass\": \"BestEffort\",\n        \"startTime\": \"2019-06-20T11:01:31Z\"\n    }\n}\n"
+STEP: replace the image in the pod
+Jun 20 11:01:36.729: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 replace -f - --namespace=kubectl-7938'
+Jun 20 11:01:36.936: INFO: stderr: ""
+Jun 20 11:01:36.936: INFO: stdout: "pod/e2e-test-nginx-pod replaced\n"
+STEP: verifying the pod e2e-test-nginx-pod has the right image docker.io/library/busybox:1.29
+[AfterEach] [k8s.io] Kubectl replace
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1727
+Jun 20 11:01:36.940: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 delete pods e2e-test-nginx-pod --namespace=kubectl-7938'
+Jun 20 11:01:47.148: INFO: stderr: ""
+Jun 20 11:01:47.148: INFO: stdout: "pod \"e2e-test-nginx-pod\" deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:01:47.148: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-7938" for this suite.
+Jun 20 11:01:53.161: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:01:53.229: INFO: namespace kubectl-7938 deletion completed in 6.077753302s
+
+• [SLOW TEST:21.735 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl replace
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+    should update a single-container pod's image  [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] ConfigMap 
+  should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:01:53.229: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename configmap
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating configMap with name configmap-test-volume-e93d7f6e-5de1-47a5-8d72-a5b2d403fd41
+STEP: Creating a pod to test consume configMaps
+Jun 20 11:01:53.273: INFO: Waiting up to 5m0s for pod "pod-configmaps-b8bb1665-ed26-48fb-8c8c-1b926b184177" in namespace "configmap-3660" to be "success or failure"
+Jun 20 11:01:53.277: INFO: Pod "pod-configmaps-b8bb1665-ed26-48fb-8c8c-1b926b184177": Phase="Pending", Reason="", readiness=false. Elapsed: 3.837374ms
+Jun 20 11:01:55.280: INFO: Pod "pod-configmaps-b8bb1665-ed26-48fb-8c8c-1b926b184177": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.007155916s
+STEP: Saw pod success
+Jun 20 11:01:55.280: INFO: Pod "pod-configmaps-b8bb1665-ed26-48fb-8c8c-1b926b184177" satisfied condition "success or failure"
+Jun 20 11:01:55.283: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-configmaps-b8bb1665-ed26-48fb-8c8c-1b926b184177 container configmap-volume-test: 
+STEP: delete the pod
+Jun 20 11:01:55.299: INFO: Waiting for pod pod-configmaps-b8bb1665-ed26-48fb-8c8c-1b926b184177 to disappear
+Jun 20 11:01:55.302: INFO: Pod pod-configmaps-b8bb1665-ed26-48fb-8c8c-1b926b184177 no longer exists
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:01:55.302: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "configmap-3660" for this suite.
+Jun 20 11:02:01.317: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:02:01.415: INFO: namespace configmap-3660 deletion completed in 6.1098466s
+
+• [SLOW TEST:8.185 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32
+  should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Container Runtime blackbox test on terminated container 
+  should report termination message [LinuxOnly] from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Container Runtime
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:02:01.415: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename container-runtime
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should report termination message [LinuxOnly] from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: create the container
+STEP: wait for the container to reach Failed
+STEP: get the container status
+STEP: the container should be terminated
+STEP: the termination message should be set
+Jun 20 11:02:04.478: INFO: Expected: &{DONE} to match Container's Termination Message: DONE --
+STEP: delete the container
+[AfterEach] [k8s.io] Container Runtime
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:02:04.492: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-runtime-3300" for this suite.
+Jun 20 11:02:10.509: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:02:10.579: INFO: namespace container-runtime-3300 deletion completed in 6.083719726s
+
+• [SLOW TEST:9.164 seconds]
+[k8s.io] Container Runtime
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  blackbox test
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/runtime.go:38
+    on terminated container
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/runtime.go:129
+      should report termination message [LinuxOnly] from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]
+      /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl run deployment 
+  should create a deployment from an image  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:02:10.579: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:221
+[BeforeEach] [k8s.io] Kubectl run deployment
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1558
+[It] should create a deployment from an image  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: running the image docker.io/library/nginx:1.14-alpine
+Jun 20 11:02:10.615: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 run e2e-test-nginx-deployment --image=docker.io/library/nginx:1.14-alpine --generator=deployment/v1beta1 --namespace=kubectl-7941'
+Jun 20 11:02:10.890: INFO: stderr: "kubectl run --generator=deployment/v1beta1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n"
+Jun 20 11:02:10.890: INFO: stdout: "deployment.extensions/e2e-test-nginx-deployment created\n"
+STEP: verifying the deployment e2e-test-nginx-deployment was created
+STEP: verifying the pod controlled by deployment e2e-test-nginx-deployment was created
+[AfterEach] [k8s.io] Kubectl run deployment
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1563
+Jun 20 11:02:12.902: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 delete deployment e2e-test-nginx-deployment --namespace=kubectl-7941'
+Jun 20 11:02:12.973: INFO: stderr: ""
+Jun 20 11:02:12.973: INFO: stdout: "deployment.extensions \"e2e-test-nginx-deployment\" deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:02:12.973: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-7941" for this suite.
+Jun 20 11:02:18.987: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:02:19.061: INFO: namespace kubectl-7941 deletion completed in 6.084835783s
+
+• [SLOW TEST:8.482 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl run deployment
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+    should create a deployment from an image  [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+[sig-storage] ConfigMap 
+  should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:02:19.061: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename configmap
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating configMap with name configmap-test-volume-f017434e-4d44-4f98-99b8-04d4c3594e85
+STEP: Creating a pod to test consume configMaps
+Jun 20 11:02:19.104: INFO: Waiting up to 5m0s for pod "pod-configmaps-075e9b99-49da-423d-9056-6f833084ec51" in namespace "configmap-190" to be "success or failure"
+Jun 20 11:02:19.109: INFO: Pod "pod-configmaps-075e9b99-49da-423d-9056-6f833084ec51": Phase="Pending", Reason="", readiness=false. Elapsed: 4.871163ms
+Jun 20 11:02:21.113: INFO: Pod "pod-configmaps-075e9b99-49da-423d-9056-6f833084ec51": Phase="Pending", Reason="", readiness=false. Elapsed: 2.008299138s
+Jun 20 11:02:23.116: INFO: Pod "pod-configmaps-075e9b99-49da-423d-9056-6f833084ec51": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.011465675s
+STEP: Saw pod success
+Jun 20 11:02:23.116: INFO: Pod "pod-configmaps-075e9b99-49da-423d-9056-6f833084ec51" satisfied condition "success or failure"
+Jun 20 11:02:23.120: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-configmaps-075e9b99-49da-423d-9056-6f833084ec51 container configmap-volume-test: 
+STEP: delete the pod
+Jun 20 11:02:23.140: INFO: Waiting for pod pod-configmaps-075e9b99-49da-423d-9056-6f833084ec51 to disappear
+Jun 20 11:02:23.143: INFO: Pod pod-configmaps-075e9b99-49da-423d-9056-6f833084ec51 no longer exists
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:02:23.143: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "configmap-190" for this suite.
+Jun 20 11:02:29.156: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:02:29.230: INFO: namespace configmap-190 deletion completed in 6.085020163s
+
+• [SLOW TEST:10.169 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32
+  should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected secret 
+  optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Projected secret
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:02:29.231: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating secret with name s-test-opt-del-c82c66e0-3edb-4bd6-8f47-d48bb8ea6884
+STEP: Creating secret with name s-test-opt-upd-4e5c9e2a-3f95-4c05-8907-a72ba1f1b65f
+STEP: Creating the pod
+STEP: Deleting secret s-test-opt-del-c82c66e0-3edb-4bd6-8f47-d48bb8ea6884
+STEP: Updating secret s-test-opt-upd-4e5c9e2a-3f95-4c05-8907-a72ba1f1b65f
+STEP: Creating secret with name s-test-opt-create-9df019ad-1205-40a4-8a72-13a892e68c52
+STEP: waiting to observe update in volume
+[AfterEach] [sig-storage] Projected secret
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:03:57.734: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-659" for this suite.
+Jun 20 11:04:19.748: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:04:19.834: INFO: namespace projected-659 deletion completed in 22.096887963s
+
+• [SLOW TEST:110.603 seconds]
+[sig-storage] Projected secret
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:33
+  optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-node] Downward API 
+  should provide host IP as an env var [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-node] Downward API
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:04:19.835: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide host IP as an env var [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test downward api env vars
+Jun 20 11:04:19.879: INFO: Waiting up to 5m0s for pod "downward-api-7575a8a0-a600-4db4-b3bd-985f83e158c1" in namespace "downward-api-9730" to be "success or failure"
+Jun 20 11:04:19.881: INFO: Pod "downward-api-7575a8a0-a600-4db4-b3bd-985f83e158c1": Phase="Pending", Reason="", readiness=false. Elapsed: 2.516399ms
+Jun 20 11:04:21.885: INFO: Pod "downward-api-7575a8a0-a600-4db4-b3bd-985f83e158c1": Phase="Pending", Reason="", readiness=false. Elapsed: 2.006004711s
+Jun 20 11:04:23.889: INFO: Pod "downward-api-7575a8a0-a600-4db4-b3bd-985f83e158c1": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.009798995s
+STEP: Saw pod success
+Jun 20 11:04:23.889: INFO: Pod "downward-api-7575a8a0-a600-4db4-b3bd-985f83e158c1" satisfied condition "success or failure"
+Jun 20 11:04:23.891: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod downward-api-7575a8a0-a600-4db4-b3bd-985f83e158c1 container dapi-container: 
+STEP: delete the pod
+Jun 20 11:04:23.910: INFO: Waiting for pod downward-api-7575a8a0-a600-4db4-b3bd-985f83e158c1 to disappear
+Jun 20 11:04:23.913: INFO: Pod downward-api-7575a8a0-a600-4db4-b3bd-985f83e158c1 no longer exists
+[AfterEach] [sig-node] Downward API
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:04:23.913: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-9730" for this suite.
+Jun 20 11:04:29.925: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:04:29.989: INFO: namespace downward-api-9730 deletion completed in 6.073139986s
+
+• [SLOW TEST:10.154 seconds]
+[sig-node] Downward API
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:32
+  should provide host IP as an env var [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:04:29.989: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename gc
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: create the rc
+STEP: delete the rc
+STEP: wait for the rc to be deleted
+STEP: Gathering metrics
+W0620 11:04:36.046635      15 metrics_grabber.go:79] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
+Jun 20 11:04:36.046: INFO: For apiserver_request_total:
+For apiserver_request_latencies_summary:
+For apiserver_init_events_total:
+For garbage_collector_attempt_to_delete_queue_latency:
+For garbage_collector_attempt_to_delete_work_duration:
+For garbage_collector_attempt_to_orphan_queue_latency:
+For garbage_collector_attempt_to_orphan_work_duration:
+For garbage_collector_dirty_processing_latency_microseconds:
+For garbage_collector_event_processing_latency_microseconds:
+For garbage_collector_graph_changes_queue_latency:
+For garbage_collector_graph_changes_work_duration:
+For garbage_collector_orphan_processing_latency_microseconds:
+For namespace_queue_latency:
+For namespace_queue_latency_sum:
+For namespace_queue_latency_count:
+For namespace_retries:
+For namespace_work_duration:
+For namespace_work_duration_sum:
+For namespace_work_duration_count:
+For function_duration_seconds:
+For errors_total:
+For evicted_pods_total:
+
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:04:36.046: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "gc-1176" for this suite.
+Jun 20 11:04:42.058: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:04:42.139: INFO: namespace gc-1176 deletion completed in 6.090107916s
+
+• [SLOW TEST:12.150 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23
+  should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+S
+------------------------------
+[sig-storage] ConfigMap 
+  updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:04:42.139: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename configmap
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating configMap with name configmap-test-upd-6c37f659-386f-4e5d-ad2b-a9c43bd01b50
+STEP: Creating the pod
+STEP: Updating configmap configmap-test-upd-6c37f659-386f-4e5d-ad2b-a9c43bd01b50
+STEP: waiting to observe update in volume
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:05:50.508: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "configmap-5161" for this suite.
+Jun 20 11:06:12.521: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:06:12.595: INFO: namespace configmap-5161 deletion completed in 22.084276686s
+
+• [SLOW TEST:90.456 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32
+  updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl logs 
+  should be able to retrieve and filter logs  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:06:12.596: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:221
+[BeforeEach] [k8s.io] Kubectl logs
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1293
+STEP: creating an rc
+Jun 20 11:06:12.631: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 create -f - --namespace=kubectl-4911'
+Jun 20 11:06:12.837: INFO: stderr: ""
+Jun 20 11:06:12.837: INFO: stdout: "replicationcontroller/redis-master created\n"
+[It] should be able to retrieve and filter logs  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Waiting for Redis master to start.
+Jun 20 11:06:13.841: INFO: Selector matched 1 pods for map[app:redis]
+Jun 20 11:06:13.841: INFO: Found 0 / 1
+Jun 20 11:06:14.841: INFO: Selector matched 1 pods for map[app:redis]
+Jun 20 11:06:14.841: INFO: Found 0 / 1
+Jun 20 11:06:15.844: INFO: Selector matched 1 pods for map[app:redis]
+Jun 20 11:06:15.844: INFO: Found 1 / 1
+Jun 20 11:06:15.844: INFO: WaitFor completed with timeout 5m0s.  Pods found = 1 out of 1
+Jun 20 11:06:15.851: INFO: Selector matched 1 pods for map[app:redis]
+Jun 20 11:06:15.851: INFO: ForEach: Found 1 pods from the filter.  Now looping through them.
+STEP: checking for a matching strings
+Jun 20 11:06:15.851: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 logs redis-master-8fhl6 redis-master --namespace=kubectl-4911'
+Jun 20 11:06:15.930: INFO: stderr: ""
+Jun 20 11:06:15.930: INFO: stdout: "                _._                                                  \n           _.-``__ ''-._                                             \n      _.-``    `.  `_.  ''-._           Redis 3.2.12 (35a5711f/0) 64 bit\n  .-`` .-```.  ```\\/    _.,_ ''-._                                   \n (    '      ,       .-`  | `,    )     Running in standalone mode\n |`-._`-...-` __...-.``-._|'` _.-'|     Port: 6379\n |    `-._   `._    /     _.-'    |     PID: 1\n  `-._    `-._  `-./  _.-'    _.-'                                   \n |`-._`-._    `-.__.-'    _.-'_.-'|                                  \n |    `-._`-._        _.-'_.-'    |           http://redis.io        \n  `-._    `-._`-.__.-'_.-'    _.-'                                   \n |`-._`-._    `-.__.-'    _.-'_.-'|                                  \n |    `-._`-._        _.-'_.-'    |                                  \n  `-._    `-._`-.__.-'_.-'    _.-'                                   \n      `-._    `-.__.-'    _.-'                                       \n          `-._        _.-'                                           \n              `-.__.-'                                               \n\n1:M 20 Jun 11:06:14.052 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.\n1:M 20 Jun 11:06:14.052 # Server started, Redis version 3.2.12\n1:M 20 Jun 11:06:14.052 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.\n1:M 20 Jun 11:06:14.052 * The server is now ready to accept connections on port 6379\n"
+STEP: limiting log lines
+Jun 20 11:06:15.930: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 log redis-master-8fhl6 redis-master --namespace=kubectl-4911 --tail=1'
+Jun 20 11:06:16.010: INFO: stderr: ""
+Jun 20 11:06:16.010: INFO: stdout: "1:M 20 Jun 11:06:14.052 * The server is now ready to accept connections on port 6379\n"
+STEP: limiting log bytes
+Jun 20 11:06:16.010: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 log redis-master-8fhl6 redis-master --namespace=kubectl-4911 --limit-bytes=1'
+Jun 20 11:06:16.091: INFO: stderr: ""
+Jun 20 11:06:16.091: INFO: stdout: " "
+STEP: exposing timestamps
+Jun 20 11:06:16.092: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 log redis-master-8fhl6 redis-master --namespace=kubectl-4911 --tail=1 --timestamps'
+Jun 20 11:06:16.209: INFO: stderr: ""
+Jun 20 11:06:16.209: INFO: stdout: "2019-06-20T11:06:14.052847669Z 1:M 20 Jun 11:06:14.052 * The server is now ready to accept connections on port 6379\n"
+STEP: restricting to a time range
+Jun 20 11:06:18.709: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 log redis-master-8fhl6 redis-master --namespace=kubectl-4911 --since=1s'
+Jun 20 11:06:18.786: INFO: stderr: ""
+Jun 20 11:06:18.787: INFO: stdout: ""
+Jun 20 11:06:18.787: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 log redis-master-8fhl6 redis-master --namespace=kubectl-4911 --since=24h'
+Jun 20 11:06:18.873: INFO: stderr: ""
+Jun 20 11:06:18.873: INFO: stdout: "                _._                                                  \n           _.-``__ ''-._                                             \n      _.-``    `.  `_.  ''-._           Redis 3.2.12 (35a5711f/0) 64 bit\n  .-`` .-```.  ```\\/    _.,_ ''-._                                   \n (    '      ,       .-`  | `,    )     Running in standalone mode\n |`-._`-...-` __...-.``-._|'` _.-'|     Port: 6379\n |    `-._   `._    /     _.-'    |     PID: 1\n  `-._    `-._  `-./  _.-'    _.-'                                   \n |`-._`-._    `-.__.-'    _.-'_.-'|                                  \n |    `-._`-._        _.-'_.-'    |           http://redis.io        \n  `-._    `-._`-.__.-'_.-'    _.-'                                   \n |`-._`-._    `-.__.-'    _.-'_.-'|                                  \n |    `-._`-._        _.-'_.-'    |                                  \n  `-._    `-._`-.__.-'_.-'    _.-'                                   \n      `-._    `-.__.-'    _.-'                                       \n          `-._        _.-'                                           \n              `-.__.-'                                               \n\n1:M 20 Jun 11:06:14.052 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.\n1:M 20 Jun 11:06:14.052 # Server started, Redis version 3.2.12\n1:M 20 Jun 11:06:14.052 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.\n1:M 20 Jun 11:06:14.052 * The server is now ready to accept connections on port 6379\n"
+[AfterEach] [k8s.io] Kubectl logs
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1299
+STEP: using delete to clean up resources
+Jun 20 11:06:18.873: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 delete --grace-period=0 --force -f - --namespace=kubectl-4911'
+Jun 20 11:06:18.947: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+Jun 20 11:06:18.947: INFO: stdout: "replicationcontroller \"redis-master\" force deleted\n"
+Jun 20 11:06:18.947: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get rc,svc -l name=nginx --no-headers --namespace=kubectl-4911'
+Jun 20 11:06:19.026: INFO: stderr: "No resources found.\n"
+Jun 20 11:06:19.026: INFO: stdout: ""
+Jun 20 11:06:19.026: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods -l name=nginx --namespace=kubectl-4911 -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}'
+Jun 20 11:06:19.090: INFO: stderr: ""
+Jun 20 11:06:19.090: INFO: stdout: ""
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:06:19.090: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-4911" for this suite.
+Jun 20 11:06:41.107: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:06:41.222: INFO: namespace kubectl-4911 deletion completed in 22.128427036s
+
+• [SLOW TEST:28.626 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl logs
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+    should be able to retrieve and filter logs  [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] Deployment 
+  deployment should delete old replica sets [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:06:41.223: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename deployment
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:68
+[It] deployment should delete old replica sets [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+Jun 20 11:06:41.266: INFO: Pod name cleanup-pod: Found 0 pods out of 1
+Jun 20 11:06:46.269: INFO: Pod name cleanup-pod: Found 1 pods out of 1
+STEP: ensuring each pod is running
+Jun 20 11:06:46.269: INFO: Creating deployment test-cleanup-deployment
+STEP: Waiting for deployment test-cleanup-deployment history to be cleaned up
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:62
+Jun 20 11:06:48.304: INFO: Deployment "test-cleanup-deployment":
+&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-cleanup-deployment,GenerateName:,Namespace:deployment-6234,SelfLink:/apis/apps/v1/namespaces/deployment-6234/deployments/test-cleanup-deployment,UID:96fcc799-012b-4357-bf94-067abb2b911f,ResourceVersion:22781,Generation:1,CreationTimestamp:2019-06-20 11:06:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,},Annotations:map[string]string{deployment.kubernetes.io/revision: 1,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:DeploymentSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,PreemptionPolicy:nil,},},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*0,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[{Available True 2019-06-20 11:06:46 +0000 UTC 2019-06-20 11:06:46 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} {Progressing True 2019-06-20 11:06:48 +0000 UTC 2019-06-20 11:06:46 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-cleanup-deployment-55bbcbc84c" has successfully progressed.}],ReadyReplicas:1,CollisionCount:nil,},}
+
+Jun 20 11:06:48.307: INFO: New ReplicaSet "test-cleanup-deployment-55bbcbc84c" of Deployment "test-cleanup-deployment":
+&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-cleanup-deployment-55bbcbc84c,GenerateName:,Namespace:deployment-6234,SelfLink:/apis/apps/v1/namespaces/deployment-6234/replicasets/test-cleanup-deployment-55bbcbc84c,UID:57b25db7-70a0-42d3-96a1-3415ae8f9d81,ResourceVersion:22770,Generation:1,CreationTimestamp:2019-06-20 11:06:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,pod-template-hash: 55bbcbc84c,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 1,},OwnerReferences:[{apps/v1 Deployment test-cleanup-deployment 96fcc799-012b-4357-bf94-067abb2b911f 0xc0025286e7 0xc0025286e8}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,pod-template-hash: 55bbcbc84c,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,pod-template-hash: 55bbcbc84c,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,PreemptionPolicy:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[],},}
+Jun 20 11:06:48.310: INFO: Pod "test-cleanup-deployment-55bbcbc84c-vzb6n" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-cleanup-deployment-55bbcbc84c-vzb6n,GenerateName:test-cleanup-deployment-55bbcbc84c-,Namespace:deployment-6234,SelfLink:/api/v1/namespaces/deployment-6234/pods/test-cleanup-deployment-55bbcbc84c-vzb6n,UID:df99f154-8dff-41ab-b59c-0432d86ff3c8,ResourceVersion:22769,Generation:0,CreationTimestamp:2019-06-20 11:06:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,pod-template-hash: 55bbcbc84c,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet test-cleanup-deployment-55bbcbc84c 57b25db7-70a0-42d3-96a1-3415ae8f9d81 0xc002528d07 0xc002528d08}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-7p6s2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-7p6s2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [{default-token-7p6s2 true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-100-10-111.eu-west-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002528d70} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002528d90}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:06:46 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:06:48 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:06:48 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:06:46 +0000 UTC  }],Message:,Reason:,HostIP:10.100.10.111,PodIP:10.38.0.3,StartTime:2019-06-20 11:06:46 +0000 UTC,ContainerStatuses:[{redis {nil ContainerStateRunning{StartedAt:2019-06-20 11:06:47 +0000 UTC,} nil} {nil nil nil} true 0 gcr.io/kubernetes-e2e-test-images/redis:1.0 docker-pullable://gcr.io/kubernetes-e2e-test-images/redis@sha256:af4748d1655c08dc54d4be5182135395db9ce87aba2d4699b26b14ae197c5830 docker://9a3519251183858ab3d50b21a8809113b1d722e26eea2b49d88150f22c970cb6}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:06:48.310: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "deployment-6234" for this suite.
+Jun 20 11:06:54.324: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:06:54.390: INFO: namespace deployment-6234 deletion completed in 6.076589799s
+
+• [SLOW TEST:13.167 seconds]
+[sig-apps] Deployment
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23
+  deployment should delete old replica sets [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSS
+------------------------------
+[sig-apps] Daemon set [Serial] 
+  should run and stop complex daemon [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:06:54.390: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename daemonsets
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:103
+[It] should run and stop complex daemon [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+Jun 20 11:06:54.437: INFO: Creating daemon "daemon-set" with a node selector
+STEP: Initially, daemon pods should not be running on any nodes.
+Jun 20 11:06:54.445: INFO: Number of nodes with available pods: 0
+Jun 20 11:06:54.445: INFO: Number of running nodes: 0, number of available pods: 0
+STEP: Change node label to blue, check that daemon pod is launched.
+Jun 20 11:06:54.459: INFO: Number of nodes with available pods: 0
+Jun 20 11:06:54.459: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 11:06:55.463: INFO: Number of nodes with available pods: 0
+Jun 20 11:06:55.463: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 11:06:56.463: INFO: Number of nodes with available pods: 1
+Jun 20 11:06:56.463: INFO: Number of running nodes: 1, number of available pods: 1
+STEP: Update the node label to green, and wait for daemons to be unscheduled
+Jun 20 11:06:56.479: INFO: Number of nodes with available pods: 1
+Jun 20 11:06:56.479: INFO: Number of running nodes: 0, number of available pods: 1
+Jun 20 11:06:57.483: INFO: Number of nodes with available pods: 0
+Jun 20 11:06:57.483: INFO: Number of running nodes: 0, number of available pods: 0
+STEP: Update DaemonSet node selector to green, and change its update strategy to RollingUpdate
+Jun 20 11:06:57.490: INFO: Number of nodes with available pods: 0
+Jun 20 11:06:57.490: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 11:06:58.493: INFO: Number of nodes with available pods: 0
+Jun 20 11:06:58.493: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 11:06:59.493: INFO: Number of nodes with available pods: 0
+Jun 20 11:06:59.493: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 11:07:00.494: INFO: Number of nodes with available pods: 0
+Jun 20 11:07:00.494: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 11:07:01.494: INFO: Number of nodes with available pods: 0
+Jun 20 11:07:01.494: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 11:07:02.493: INFO: Number of nodes with available pods: 0
+Jun 20 11:07:02.493: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 11:07:03.493: INFO: Number of nodes with available pods: 0
+Jun 20 11:07:03.493: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 11:07:04.496: INFO: Number of nodes with available pods: 0
+Jun 20 11:07:04.496: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 11:07:05.497: INFO: Number of nodes with available pods: 0
+Jun 20 11:07:05.497: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 11:07:06.493: INFO: Number of nodes with available pods: 0
+Jun 20 11:07:06.493: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 11:07:07.493: INFO: Number of nodes with available pods: 0
+Jun 20 11:07:07.493: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 11:07:08.493: INFO: Number of nodes with available pods: 0
+Jun 20 11:07:08.493: INFO: Node ip-10-100-10-111.eu-west-1.compute.internal is running more than one daemon pod
+Jun 20 11:07:09.495: INFO: Number of nodes with available pods: 1
+Jun 20 11:07:09.495: INFO: Number of running nodes: 1, number of available pods: 1
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:69
+STEP: Deleting DaemonSet "daemon-set"
+STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-9047, will wait for the garbage collector to delete the pods
+Jun 20 11:07:09.568: INFO: Deleting DaemonSet.extensions daemon-set took: 11.426209ms
+Jun 20 11:07:09.668: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.224626ms
+Jun 20 11:07:17.177: INFO: Number of nodes with available pods: 0
+Jun 20 11:07:17.177: INFO: Number of running nodes: 0, number of available pods: 0
+Jun 20 11:07:17.179: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/daemonsets-9047/daemonsets","resourceVersion":"22913"},"items":null}
+
+Jun 20 11:07:17.181: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/daemonsets-9047/pods","resourceVersion":"22913"},"items":null}
+
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:07:17.196: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "daemonsets-9047" for this suite.
+Jun 20 11:07:23.209: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:07:23.284: INFO: namespace daemonsets-9047 deletion completed in 6.086206548s
+
+• [SLOW TEST:28.894 seconds]
+[sig-apps] Daemon set [Serial]
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23
+  should run and stop complex daemon [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSS
+------------------------------
+[k8s.io] InitContainer [NodeConformance] 
+  should invoke init containers on a RestartNever pod [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:07:23.285: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename init-container
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:44
+[It] should invoke init containers on a RestartNever pod [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: creating the pod
+Jun 20 11:07:23.328: INFO: PodSpec: initContainers in spec.initContainers
+[AfterEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:07:26.763: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "init-container-158" for this suite.
+Jun 20 11:07:32.780: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:07:32.843: INFO: namespace init-container-158 deletion completed in 6.075571112s
+
+• [SLOW TEST:9.559 seconds]
+[k8s.io] InitContainer [NodeConformance]
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  should invoke init containers on a RestartNever pod [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should provide container's memory limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:07:32.845: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should provide container's memory limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test downward API volume plugin
+Jun 20 11:07:32.885: INFO: Waiting up to 5m0s for pod "downwardapi-volume-17c6430d-ca2b-46aa-a7d4-c9af380be0e8" in namespace "downward-api-6557" to be "success or failure"
+Jun 20 11:07:32.888: INFO: Pod "downwardapi-volume-17c6430d-ca2b-46aa-a7d4-c9af380be0e8": Phase="Pending", Reason="", readiness=false. Elapsed: 2.429077ms
+Jun 20 11:07:34.891: INFO: Pod "downwardapi-volume-17c6430d-ca2b-46aa-a7d4-c9af380be0e8": Phase="Pending", Reason="", readiness=false. Elapsed: 2.005668501s
+Jun 20 11:07:36.894: INFO: Pod "downwardapi-volume-17c6430d-ca2b-46aa-a7d4-c9af380be0e8": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.009115405s
+STEP: Saw pod success
+Jun 20 11:07:36.894: INFO: Pod "downwardapi-volume-17c6430d-ca2b-46aa-a7d4-c9af380be0e8" satisfied condition "success or failure"
+Jun 20 11:07:36.897: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod downwardapi-volume-17c6430d-ca2b-46aa-a7d4-c9af380be0e8 container client-container: 
+STEP: delete the pod
+Jun 20 11:07:36.917: INFO: Waiting for pod downwardapi-volume-17c6430d-ca2b-46aa-a7d4-c9af380be0e8 to disappear
+Jun 20 11:07:36.920: INFO: Pod downwardapi-volume-17c6430d-ca2b-46aa-a7d4-c9af380be0e8 no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:07:36.920: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-6557" for this suite.
+Jun 20 11:07:42.933: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:07:43.002: INFO: namespace downward-api-6557 deletion completed in 6.078231418s
+
+• [SLOW TEST:10.157 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should provide container's memory limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSS
+------------------------------
+[sig-node] ConfigMap 
+  should be consumable via environment variable [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-node] ConfigMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:07:43.002: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename configmap
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable via environment variable [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating configMap configmap-3917/configmap-test-540b77bb-d021-46e5-bc2a-81e4e9932d27
+STEP: Creating a pod to test consume configMaps
+Jun 20 11:07:43.046: INFO: Waiting up to 5m0s for pod "pod-configmaps-445b8da6-07aa-4d63-8da3-f710e42c216a" in namespace "configmap-3917" to be "success or failure"
+Jun 20 11:07:43.048: INFO: Pod "pod-configmaps-445b8da6-07aa-4d63-8da3-f710e42c216a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.406867ms
+Jun 20 11:07:45.052: INFO: Pod "pod-configmaps-445b8da6-07aa-4d63-8da3-f710e42c216a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.005695967s
+STEP: Saw pod success
+Jun 20 11:07:45.052: INFO: Pod "pod-configmaps-445b8da6-07aa-4d63-8da3-f710e42c216a" satisfied condition "success or failure"
+Jun 20 11:07:45.054: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-configmaps-445b8da6-07aa-4d63-8da3-f710e42c216a container env-test: 
+STEP: delete the pod
+Jun 20 11:07:45.071: INFO: Waiting for pod pod-configmaps-445b8da6-07aa-4d63-8da3-f710e42c216a to disappear
+Jun 20 11:07:45.073: INFO: Pod pod-configmaps-445b8da6-07aa-4d63-8da3-f710e42c216a no longer exists
+[AfterEach] [sig-node] ConfigMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:07:45.073: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "configmap-3917" for this suite.
+Jun 20 11:07:51.088: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:07:51.209: INFO: namespace configmap-3917 deletion completed in 6.132328449s
+
+• [SLOW TEST:8.207 seconds]
+[sig-node] ConfigMap
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap.go:31
+  should be consumable via environment variable [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Probing container 
+  with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:07:51.209: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename container-probe
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:51
+[It] with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+Jun 20 11:08:15.265: INFO: Container started at 2019-06-20 11:07:52 +0000 UTC, pod became ready at 2019-06-20 11:08:13 +0000 UTC
+[AfterEach] [k8s.io] Probing container
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:08:15.265: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-probe-1392" for this suite.
+Jun 20 11:08:37.284: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:08:37.353: INFO: namespace container-probe-1392 deletion completed in 22.081087476s
+
+• [SLOW TEST:46.144 seconds]
+[k8s.io] Probing container
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSS
+------------------------------
+[k8s.io] Pods 
+  should contain environment variables for services [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:08:37.354: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename pods
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:164
+[It] should contain environment variables for services [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+Jun 20 11:08:41.418: INFO: Waiting up to 5m0s for pod "client-envvars-093ece5c-e737-40d8-978f-2e5ff80a6382" in namespace "pods-6285" to be "success or failure"
+Jun 20 11:08:41.420: INFO: Pod "client-envvars-093ece5c-e737-40d8-978f-2e5ff80a6382": Phase="Pending", Reason="", readiness=false. Elapsed: 2.577229ms
+Jun 20 11:08:43.424: INFO: Pod "client-envvars-093ece5c-e737-40d8-978f-2e5ff80a6382": Phase="Pending", Reason="", readiness=false. Elapsed: 2.006272592s
+Jun 20 11:08:45.427: INFO: Pod "client-envvars-093ece5c-e737-40d8-978f-2e5ff80a6382": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.009637922s
+STEP: Saw pod success
+Jun 20 11:08:45.427: INFO: Pod "client-envvars-093ece5c-e737-40d8-978f-2e5ff80a6382" satisfied condition "success or failure"
+Jun 20 11:08:45.430: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod client-envvars-093ece5c-e737-40d8-978f-2e5ff80a6382 container env3cont: 
+STEP: delete the pod
+Jun 20 11:08:45.449: INFO: Waiting for pod client-envvars-093ece5c-e737-40d8-978f-2e5ff80a6382 to disappear
+Jun 20 11:08:45.451: INFO: Pod client-envvars-093ece5c-e737-40d8-978f-2e5ff80a6382 no longer exists
+[AfterEach] [k8s.io] Pods
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:08:45.451: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pods-6285" for this suite.
+Jun 20 11:09:29.464: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:09:29.535: INFO: namespace pods-6285 deletion completed in 44.080487489s
+
+• [SLOW TEST:52.181 seconds]
+[k8s.io] Pods
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  should contain environment variables for services [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:09:29.535: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test emptydir 0644 on node default medium
+Jun 20 11:09:29.578: INFO: Waiting up to 5m0s for pod "pod-544f4653-3bb2-48a8-be83-38f5303a6a48" in namespace "emptydir-8965" to be "success or failure"
+Jun 20 11:09:29.584: INFO: Pod "pod-544f4653-3bb2-48a8-be83-38f5303a6a48": Phase="Pending", Reason="", readiness=false. Elapsed: 5.814218ms
+Jun 20 11:09:31.588: INFO: Pod "pod-544f4653-3bb2-48a8-be83-38f5303a6a48": Phase="Running", Reason="", readiness=true. Elapsed: 2.009692316s
+Jun 20 11:09:33.591: INFO: Pod "pod-544f4653-3bb2-48a8-be83-38f5303a6a48": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01334619s
+STEP: Saw pod success
+Jun 20 11:09:33.591: INFO: Pod "pod-544f4653-3bb2-48a8-be83-38f5303a6a48" satisfied condition "success or failure"
+Jun 20 11:09:33.594: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-544f4653-3bb2-48a8-be83-38f5303a6a48 container test-container: 
+STEP: delete the pod
+Jun 20 11:09:33.613: INFO: Waiting for pod pod-544f4653-3bb2-48a8-be83-38f5303a6a48 to disappear
+Jun 20 11:09:33.616: INFO: Pod pod-544f4653-3bb2-48a8-be83-38f5303a6a48 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:09:33.616: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-8965" for this suite.
+Jun 20 11:09:39.632: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:09:39.701: INFO: namespace emptydir-8965 deletion completed in 6.081796646s
+
+• [SLOW TEST:10.166 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected configMap 
+  should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:09:39.702: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating configMap with name projected-configmap-test-volume-6baaec73-db80-4eb6-9f58-75c5051db4af
+STEP: Creating a pod to test consume configMaps
+Jun 20 11:09:39.744: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-058692b5-478b-4b68-a0ce-797f6ff2760d" in namespace "projected-9105" to be "success or failure"
+Jun 20 11:09:39.749: INFO: Pod "pod-projected-configmaps-058692b5-478b-4b68-a0ce-797f6ff2760d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.018221ms
+Jun 20 11:09:41.753: INFO: Pod "pod-projected-configmaps-058692b5-478b-4b68-a0ce-797f6ff2760d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.008588352s
+STEP: Saw pod success
+Jun 20 11:09:41.753: INFO: Pod "pod-projected-configmaps-058692b5-478b-4b68-a0ce-797f6ff2760d" satisfied condition "success or failure"
+Jun 20 11:09:41.755: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-projected-configmaps-058692b5-478b-4b68-a0ce-797f6ff2760d container projected-configmap-volume-test: 
+STEP: delete the pod
+Jun 20 11:09:41.773: INFO: Waiting for pod pod-projected-configmaps-058692b5-478b-4b68-a0ce-797f6ff2760d to disappear
+Jun 20 11:09:41.777: INFO: Pod pod-projected-configmaps-058692b5-478b-4b68-a0ce-797f6ff2760d no longer exists
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:09:41.777: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-9105" for this suite.
+Jun 20 11:09:47.790: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:09:47.855: INFO: namespace projected-9105 deletion completed in 6.074485971s
+
+• [SLOW TEST:8.153 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:33
+  should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should delete RS created by deployment when not orphaning [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:09:47.855: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename gc
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should delete RS created by deployment when not orphaning [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: create the deployment
+STEP: Wait for the Deployment to create new ReplicaSet
+STEP: delete the deployment
+STEP: wait for all rs to be garbage collected
+STEP: expected 0 rs, got 1 rs
+STEP: expected 0 pods, got 2 pods
+STEP: Gathering metrics
+Jun 20 11:09:48.921: INFO: For apiserver_request_total:
+For apiserver_request_latencies_summary:
+For apiserver_init_events_total:
+For garbage_collector_attempt_to_delete_queue_latency:
+For garbage_collector_attempt_to_delete_work_duration:
+For garbage_collector_attempt_to_orphan_queue_latency:
+For garbage_collector_attempt_to_orphan_work_duration:
+For garbage_collector_dirty_processing_latency_microseconds:
+For garbage_collector_event_processing_latency_microseconds:
+For garbage_collector_graph_changes_queue_latency:
+For garbage_collector_graph_changes_work_duration:
+For garbage_collector_orphan_processing_latency_microseconds:
+For namespace_queue_latency:
+For namespace_queue_latency_sum:
+For namespace_queue_latency_count:
+For namespace_retries:
+For namespace_work_duration:
+For namespace_work_duration_sum:
+For namespace_work_duration_count:
+For function_duration_seconds:
+For errors_total:
+For evicted_pods_total:
+
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:09:48.921: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+W0620 11:09:48.921692      15 metrics_grabber.go:79] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
+STEP: Destroying namespace "gc-2239" for this suite.
+Jun 20 11:09:54.933: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:09:54.998: INFO: namespace gc-2239 deletion completed in 6.074011584s
+
+• [SLOW TEST:7.143 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23
+  should delete RS created by deployment when not orphaning [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:09:54.998: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test downward API volume plugin
+Jun 20 11:09:55.038: INFO: Waiting up to 5m0s for pod "downwardapi-volume-a838ec97-ba02-4826-9bc5-6c75c8ea202d" in namespace "projected-9616" to be "success or failure"
+Jun 20 11:09:55.044: INFO: Pod "downwardapi-volume-a838ec97-ba02-4826-9bc5-6c75c8ea202d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.010048ms
+Jun 20 11:09:57.047: INFO: Pod "downwardapi-volume-a838ec97-ba02-4826-9bc5-6c75c8ea202d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.009242402s
+STEP: Saw pod success
+Jun 20 11:09:57.047: INFO: Pod "downwardapi-volume-a838ec97-ba02-4826-9bc5-6c75c8ea202d" satisfied condition "success or failure"
+Jun 20 11:09:57.049: INFO: Trying to get logs from node ip-10-100-12-226.eu-west-1.compute.internal pod downwardapi-volume-a838ec97-ba02-4826-9bc5-6c75c8ea202d container client-container: 
+STEP: delete the pod
+Jun 20 11:09:57.069: INFO: Waiting for pod downwardapi-volume-a838ec97-ba02-4826-9bc5-6c75c8ea202d to disappear
+Jun 20 11:09:57.072: INFO: Pod downwardapi-volume-a838ec97-ba02-4826-9bc5-6c75c8ea202d no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:09:57.072: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-9616" for this suite.
+Jun 20 11:10:03.085: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:10:03.165: INFO: namespace projected-9616 deletion completed in 6.090684939s
+
+• [SLOW TEST:8.167 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSS
+------------------------------
+[k8s.io] Probing container 
+  should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:10:03.166: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename container-probe
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:51
+[It] should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating pod busybox-6d089581-3d4d-4304-8674-40145a084a8f in namespace container-probe-2646
+Jun 20 11:10:07.230: INFO: Started pod busybox-6d089581-3d4d-4304-8674-40145a084a8f in namespace container-probe-2646
+STEP: checking the pod's current state and verifying that restartCount is present
+Jun 20 11:10:07.233: INFO: Initial restart count of pod busybox-6d089581-3d4d-4304-8674-40145a084a8f is 0
+STEP: deleting the pod
+[AfterEach] [k8s.io] Probing container
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:14:07.700: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-probe-2646" for this suite.
+Jun 20 11:14:13.715: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:14:13.784: INFO: namespace container-probe-2646 deletion completed in 6.079456423s
+
+• [SLOW TEST:250.618 seconds]
+[k8s.io] Probing container
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSS
+------------------------------
+[sig-auth] ServiceAccounts 
+  should allow opting out of API token automount  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-auth] ServiceAccounts
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:14:13.784: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename svcaccounts
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should allow opting out of API token automount  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: getting the auto-created API token
+Jun 20 11:14:14.339: INFO: created pod pod-service-account-defaultsa
+Jun 20 11:14:14.339: INFO: pod pod-service-account-defaultsa service account token volume mount: true
+Jun 20 11:14:14.343: INFO: created pod pod-service-account-mountsa
+Jun 20 11:14:14.343: INFO: pod pod-service-account-mountsa service account token volume mount: true
+Jun 20 11:14:14.349: INFO: created pod pod-service-account-nomountsa
+Jun 20 11:14:14.349: INFO: pod pod-service-account-nomountsa service account token volume mount: false
+Jun 20 11:14:14.355: INFO: created pod pod-service-account-defaultsa-mountspec
+Jun 20 11:14:14.355: INFO: pod pod-service-account-defaultsa-mountspec service account token volume mount: true
+Jun 20 11:14:14.362: INFO: created pod pod-service-account-mountsa-mountspec
+Jun 20 11:14:14.362: INFO: pod pod-service-account-mountsa-mountspec service account token volume mount: true
+Jun 20 11:14:14.369: INFO: created pod pod-service-account-nomountsa-mountspec
+Jun 20 11:14:14.369: INFO: pod pod-service-account-nomountsa-mountspec service account token volume mount: true
+Jun 20 11:14:14.377: INFO: created pod pod-service-account-defaultsa-nomountspec
+Jun 20 11:14:14.377: INFO: pod pod-service-account-defaultsa-nomountspec service account token volume mount: false
+Jun 20 11:14:14.385: INFO: created pod pod-service-account-mountsa-nomountspec
+Jun 20 11:14:14.385: INFO: pod pod-service-account-mountsa-nomountspec service account token volume mount: false
+Jun 20 11:14:14.395: INFO: created pod pod-service-account-nomountsa-nomountspec
+Jun 20 11:14:14.395: INFO: pod pod-service-account-nomountsa-nomountspec service account token volume mount: false
+[AfterEach] [sig-auth] ServiceAccounts
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:14:14.395: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "svcaccounts-15" for this suite.
+Jun 20 11:14:20.411: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:14:20.476: INFO: namespace svcaccounts-15 deletion completed in 6.077054062s
+
+• [SLOW TEST:6.692 seconds]
+[sig-auth] ServiceAccounts
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/auth/framework.go:23
+  should allow opting out of API token automount  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-network] DNS 
+  should provide DNS for the cluster  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-network] DNS
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:14:20.477: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename dns
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide DNS for the cluster  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@kubernetes.default.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-8683.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;sleep 1; done
+
+STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@kubernetes.default.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-8683.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;sleep 1; done
+
+STEP: creating a pod to probe DNS
+STEP: submitting the pod to kubernetes
+STEP: retrieving the pod
+STEP: looking for the results for each expected name from probers
+Jun 20 11:14:22.562: INFO: DNS probes using dns-8683/dns-test-5bb66a24-416d-45a9-8db2-5d87a0606967 succeeded
+
+STEP: deleting the pod
+[AfterEach] [sig-network] DNS
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:14:22.578: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "dns-8683" for this suite.
+Jun 20 11:14:28.591: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:14:28.663: INFO: namespace dns-8683 deletion completed in 6.081483054s
+
+• [SLOW TEST:8.187 seconds]
+[sig-network] DNS
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23
+  should provide DNS for the cluster  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl run default 
+  should create an rc or deployment from an image  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:14:28.664: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:221
+[BeforeEach] [k8s.io] Kubectl run default
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1421
+[It] should create an rc or deployment from an image  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: running the image docker.io/library/nginx:1.14-alpine
+Jun 20 11:14:28.695: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 run e2e-test-nginx-deployment --image=docker.io/library/nginx:1.14-alpine --namespace=kubectl-9036'
+Jun 20 11:14:28.947: INFO: stderr: "kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n"
+Jun 20 11:14:28.947: INFO: stdout: "deployment.apps/e2e-test-nginx-deployment created\n"
+STEP: verifying the pod controlled by e2e-test-nginx-deployment gets created
+[AfterEach] [k8s.io] Kubectl run default
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1427
+Jun 20 11:14:30.954: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 delete deployment e2e-test-nginx-deployment --namespace=kubectl-9036'
+Jun 20 11:14:31.065: INFO: stderr: ""
+Jun 20 11:14:31.065: INFO: stdout: "deployment.extensions \"e2e-test-nginx-deployment\" deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:14:31.065: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-9036" for this suite.
+Jun 20 11:14:53.078: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:14:53.157: INFO: namespace kubectl-9036 deletion completed in 22.088211043s
+
+• [SLOW TEST:24.492 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl run default
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+    should create an rc or deployment from an image  [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSS
+------------------------------
+[sig-apps] Deployment 
+  deployment should support proportional scaling [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:14:53.157: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename deployment
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:68
+[It] deployment should support proportional scaling [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+Jun 20 11:14:53.187: INFO: Creating deployment "nginx-deployment"
+Jun 20 11:14:53.192: INFO: Waiting for observed generation 1
+Jun 20 11:14:55.198: INFO: Waiting for all required pods to come up
+Jun 20 11:14:55.202: INFO: Pod name nginx: Found 10 pods out of 10
+STEP: ensuring each pod is running
+Jun 20 11:14:57.214: INFO: Waiting for deployment "nginx-deployment" to complete
+Jun 20 11:14:57.218: INFO: Updating deployment "nginx-deployment" with a non-existent image
+Jun 20 11:14:57.225: INFO: Updating deployment nginx-deployment
+Jun 20 11:14:57.225: INFO: Waiting for observed generation 2
+Jun 20 11:14:59.231: INFO: Waiting for the first rollout's replicaset to have .status.availableReplicas = 8
+Jun 20 11:14:59.233: INFO: Waiting for the first rollout's replicaset to have .spec.replicas = 8
+Jun 20 11:14:59.235: INFO: Waiting for the first rollout's replicaset of deployment "nginx-deployment" to have desired number of replicas
+Jun 20 11:14:59.241: INFO: Verifying that the second rollout's replicaset has .status.availableReplicas = 0
+Jun 20 11:14:59.241: INFO: Waiting for the second rollout's replicaset to have .spec.replicas = 5
+Jun 20 11:14:59.243: INFO: Waiting for the second rollout's replicaset of deployment "nginx-deployment" to have desired number of replicas
+Jun 20 11:14:59.247: INFO: Verifying that deployment "nginx-deployment" has minimum required number of available replicas
+Jun 20 11:14:59.247: INFO: Scaling up the deployment "nginx-deployment" from 10 to 30
+Jun 20 11:14:59.252: INFO: Updating deployment nginx-deployment
+Jun 20 11:14:59.252: INFO: Waiting for the replicasets of deployment "nginx-deployment" to have desired number of replicas
+Jun 20 11:14:59.258: INFO: Verifying that first rollout's replicaset has .spec.replicas = 20
+Jun 20 11:14:59.261: INFO: Verifying that second rollout's replicaset has .spec.replicas = 13
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:62
+Jun 20 11:14:59.268: INFO: Deployment "nginx-deployment":
+&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment,GenerateName:,Namespace:deployment-1133,SelfLink:/apis/apps/v1/namespaces/deployment-1133/deployments/nginx-deployment,UID:c5d0cf9d-b3b1-4e2a-b321-6148c9266bd6,ResourceVersion:24443,Generation:3,CreationTimestamp:2019-06-20 11:14:53 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,},Annotations:map[string]string{deployment.kubernetes.io/revision: 2,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:DeploymentSpec{Replicas:*30,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: nginx,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,PreemptionPolicy:nil,},},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:2,MaxSurge:3,},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:13,UpdatedReplicas:5,AvailableReplicas:8,UnavailableReplicas:5,Conditions:[{Available True 2019-06-20 11:14:56 +0000 UTC 2019-06-20 11:14:56 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} {Progressing True 2019-06-20 11:14:57 +0000 UTC 2019-06-20 11:14:53 +0000 UTC ReplicaSetUpdated ReplicaSet "nginx-deployment-55fb7cb77f" is progressing.}],ReadyReplicas:8,CollisionCount:nil,},}
+
+Jun 20 11:14:59.273: INFO: New ReplicaSet "nginx-deployment-55fb7cb77f" of Deployment "nginx-deployment":
+&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-55fb7cb77f,GenerateName:,Namespace:deployment-1133,SelfLink:/apis/apps/v1/namespaces/deployment-1133/replicasets/nginx-deployment-55fb7cb77f,UID:22859f57-b131-4454-880a-f49cd168c407,ResourceVersion:24445,Generation:3,CreationTimestamp:2019-06-20 11:14:57 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 55fb7cb77f,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 30,deployment.kubernetes.io/max-replicas: 33,deployment.kubernetes.io/revision: 2,},OwnerReferences:[{apps/v1 Deployment nginx-deployment c5d0cf9d-b3b1-4e2a-b321-6148c9266bd6 0xc0014ffba7 0xc0014ffba8}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*13,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: nginx,pod-template-hash: 55fb7cb77f,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 55fb7cb77f,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,PreemptionPolicy:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:5,FullyLabeledReplicas:5,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},}
+Jun 20 11:14:59.273: INFO: All old ReplicaSets of Deployment "nginx-deployment":
+Jun 20 11:14:59.273: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-7b8c6f4498,GenerateName:,Namespace:deployment-1133,SelfLink:/apis/apps/v1/namespaces/deployment-1133/replicasets/nginx-deployment-7b8c6f4498,UID:029355d1-b157-4ecd-98b8-494baa43964a,ResourceVersion:24444,Generation:3,CreationTimestamp:2019-06-20 11:14:53 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 7b8c6f4498,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 30,deployment.kubernetes.io/max-replicas: 33,deployment.kubernetes.io/revision: 1,},OwnerReferences:[{apps/v1 Deployment nginx-deployment c5d0cf9d-b3b1-4e2a-b321-6148c9266bd6 0xc0014ffd17 0xc0014ffd18}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*20,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: nginx,pod-template-hash: 7b8c6f4498,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 7b8c6f4498,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,PreemptionPolicy:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:8,FullyLabeledReplicas:8,ObservedGeneration:2,ReadyReplicas:8,AvailableReplicas:8,Conditions:[],},}
+Jun 20 11:14:59.288: INFO: Pod "nginx-deployment-55fb7cb77f-7qkvx" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-55fb7cb77f-7qkvx,GenerateName:nginx-deployment-55fb7cb77f-,Namespace:deployment-1133,SelfLink:/api/v1/namespaces/deployment-1133/pods/nginx-deployment-55fb7cb77f-7qkvx,UID:90b1ff4c-eaf3-44d2-b4ae-6316ff1cab7e,ResourceVersion:24409,Generation:0,CreationTimestamp:2019-06-20 11:14:57 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 55fb7cb77f,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-55fb7cb77f 22859f57-b131-4454-880a-f49cd168c407 0xc002cb2fa7 0xc002cb2fa8}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-pq984 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pq984,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-pq984 true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-100-10-111.eu-west-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002cb3050} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002cb30d0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:57 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:57 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:57 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:57 +0000 UTC  }],Message:,Reason:,HostIP:10.100.10.111,PodIP:,StartTime:2019-06-20 11:14:57 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun 20 11:14:59.289: INFO: Pod "nginx-deployment-55fb7cb77f-8s2pz" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-55fb7cb77f-8s2pz,GenerateName:nginx-deployment-55fb7cb77f-,Namespace:deployment-1133,SelfLink:/api/v1/namespaces/deployment-1133/pods/nginx-deployment-55fb7cb77f-8s2pz,UID:d18f80e2-2ddf-4305-a963-81a9f3c10810,ResourceVersion:24426,Generation:0,CreationTimestamp:2019-06-20 11:14:57 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 55fb7cb77f,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-55fb7cb77f 22859f57-b131-4454-880a-f49cd168c407 0xc002cb3240 0xc002cb3241}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-pq984 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pq984,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-pq984 true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-100-12-226.eu-west-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002cb32d0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002cb32f0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:57 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:57 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:57 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:57 +0000 UTC  }],Message:,Reason:,HostIP:10.100.12.226,PodIP:,StartTime:2019-06-20 11:14:57 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun 20 11:14:59.289: INFO: Pod "nginx-deployment-55fb7cb77f-c8dzz" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-55fb7cb77f-c8dzz,GenerateName:nginx-deployment-55fb7cb77f-,Namespace:deployment-1133,SelfLink:/api/v1/namespaces/deployment-1133/pods/nginx-deployment-55fb7cb77f-c8dzz,UID:d0fad6d2-81e7-473a-a3e0-92886093938b,ResourceVersion:24430,Generation:0,CreationTimestamp:2019-06-20 11:14:57 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 55fb7cb77f,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-55fb7cb77f 22859f57-b131-4454-880a-f49cd168c407 0xc002cb3490 0xc002cb3491}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-pq984 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pq984,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-pq984 true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-100-10-111.eu-west-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002cb3540} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002cb3560}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:57 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:57 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:57 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:57 +0000 UTC  }],Message:,Reason:,HostIP:10.100.10.111,PodIP:,StartTime:2019-06-20 11:14:57 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun 20 11:14:59.289: INFO: Pod "nginx-deployment-55fb7cb77f-f2srq" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-55fb7cb77f-f2srq,GenerateName:nginx-deployment-55fb7cb77f-,Namespace:deployment-1133,SelfLink:/api/v1/namespaces/deployment-1133/pods/nginx-deployment-55fb7cb77f-f2srq,UID:279ffdde-cc8b-4cef-9989-e60236ccc5d3,ResourceVersion:24448,Generation:0,CreationTimestamp:2019-06-20 11:14:59 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 55fb7cb77f,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-55fb7cb77f 22859f57-b131-4454-880a-f49cd168c407 0xc002cb3710 0xc002cb3711}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-pq984 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pq984,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-pq984 true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002cb37f0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002cb3810}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,},Status:PodStatus{Phase:Pending,Conditions:[],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun 20 11:14:59.289: INFO: Pod "nginx-deployment-55fb7cb77f-ptk47" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-55fb7cb77f-ptk47,GenerateName:nginx-deployment-55fb7cb77f-,Namespace:deployment-1133,SelfLink:/api/v1/namespaces/deployment-1133/pods/nginx-deployment-55fb7cb77f-ptk47,UID:93f4c6f6-3730-48f0-a70f-13586f2aa966,ResourceVersion:24406,Generation:0,CreationTimestamp:2019-06-20 11:14:57 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 55fb7cb77f,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-55fb7cb77f 22859f57-b131-4454-880a-f49cd168c407 0xc002cb38c7 0xc002cb38c8}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-pq984 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pq984,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-pq984 true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-100-12-226.eu-west-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002cb3970} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002cb39a0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:57 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:57 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:57 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:57 +0000 UTC  }],Message:,Reason:,HostIP:10.100.12.226,PodIP:,StartTime:2019-06-20 11:14:57 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun 20 11:14:59.289: INFO: Pod "nginx-deployment-55fb7cb77f-vrzkk" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-55fb7cb77f-vrzkk,GenerateName:nginx-deployment-55fb7cb77f-,Namespace:deployment-1133,SelfLink:/api/v1/namespaces/deployment-1133/pods/nginx-deployment-55fb7cb77f-vrzkk,UID:17ac1960-b789-4d8d-8f78-43a0169990c0,ResourceVersion:24400,Generation:0,CreationTimestamp:2019-06-20 11:14:57 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 55fb7cb77f,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-55fb7cb77f 22859f57-b131-4454-880a-f49cd168c407 0xc002cb3b50 0xc002cb3b51}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-pq984 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pq984,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-pq984 true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-100-10-111.eu-west-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002cb3c10} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002cb3c30}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:57 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:57 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:57 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:57 +0000 UTC  }],Message:,Reason:,HostIP:10.100.10.111,PodIP:,StartTime:2019-06-20 11:14:57 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun 20 11:14:59.290: INFO: Pod "nginx-deployment-7b8c6f4498-6bzrv" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-7b8c6f4498-6bzrv,GenerateName:nginx-deployment-7b8c6f4498-,Namespace:deployment-1133,SelfLink:/api/v1/namespaces/deployment-1133/pods/nginx-deployment-7b8c6f4498-6bzrv,UID:1a61e8d1-45f1-44d8-9b54-1cbe27647277,ResourceVersion:24357,Generation:0,CreationTimestamp:2019-06-20 11:14:53 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 7b8c6f4498,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-7b8c6f4498 029355d1-b157-4ecd-98b8-494baa43964a 0xc002cb3d50 0xc002cb3d51}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-pq984 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pq984,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pq984 true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-100-12-226.eu-west-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002cb3db0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002cb3dd0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:53 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:55 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:55 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:53 +0000 UTC  }],Message:,Reason:,HostIP:10.100.12.226,PodIP:10.34.0.5,StartTime:2019-06-20 11:14:53 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-20 11:14:55 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://35eb3ecf32232fc450eac2badb5cad6cc6f1da8f21648e3e212840baa6d8407e}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun 20 11:14:59.290: INFO: Pod "nginx-deployment-7b8c6f4498-6gfff" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-7b8c6f4498-6gfff,GenerateName:nginx-deployment-7b8c6f4498-,Namespace:deployment-1133,SelfLink:/api/v1/namespaces/deployment-1133/pods/nginx-deployment-7b8c6f4498-6gfff,UID:f05f4bda-3aec-4680-99b1-f9a30f4b9b13,ResourceVersion:24380,Generation:0,CreationTimestamp:2019-06-20 11:14:53 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 7b8c6f4498,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-7b8c6f4498 029355d1-b157-4ecd-98b8-494baa43964a 0xc002cb3ea0 0xc002cb3ea1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-pq984 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pq984,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pq984 true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-100-12-226.eu-west-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002cb3f00} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002cb3f20}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:53 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:56 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:56 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:53 +0000 UTC  }],Message:,Reason:,HostIP:10.100.12.226,PodIP:10.34.0.3,StartTime:2019-06-20 11:14:53 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-20 11:14:55 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://64d177f112288a3c975989d2a9f48fcd95411a8ec490e3b9f3f930ea2e50ec12}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun 20 11:14:59.290: INFO: Pod "nginx-deployment-7b8c6f4498-h294d" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-7b8c6f4498-h294d,GenerateName:nginx-deployment-7b8c6f4498-,Namespace:deployment-1133,SelfLink:/api/v1/namespaces/deployment-1133/pods/nginx-deployment-7b8c6f4498-h294d,UID:cad2a2d7-cd3a-4dba-b0f3-26a411fe5d7d,ResourceVersion:24367,Generation:0,CreationTimestamp:2019-06-20 11:14:53 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 7b8c6f4498,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-7b8c6f4498 029355d1-b157-4ecd-98b8-494baa43964a 0xc0025540e0 0xc0025540e1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-pq984 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pq984,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pq984 true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-100-10-111.eu-west-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002554250} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002554270}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:53 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:56 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:56 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:53 +0000 UTC  }],Message:,Reason:,HostIP:10.100.10.111,PodIP:10.38.0.5,StartTime:2019-06-20 11:14:53 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-20 11:14:55 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://9b2de8b67a1903361e3f5c1309e4fe541b06dfb70bc384a6e11d6468bddd8c4a}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun 20 11:14:59.290: INFO: Pod "nginx-deployment-7b8c6f4498-jhcmm" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-7b8c6f4498-jhcmm,GenerateName:nginx-deployment-7b8c6f4498-,Namespace:deployment-1133,SelfLink:/api/v1/namespaces/deployment-1133/pods/nginx-deployment-7b8c6f4498-jhcmm,UID:73d160b3-6b8a-42aa-8ad8-1805c39c5a2f,ResourceVersion:24353,Generation:0,CreationTimestamp:2019-06-20 11:14:53 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 7b8c6f4498,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-7b8c6f4498 029355d1-b157-4ecd-98b8-494baa43964a 0xc002554360 0xc002554361}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-pq984 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pq984,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pq984 true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-100-12-226.eu-west-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc0025543c0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc0025543e0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:53 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:55 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:55 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:53 +0000 UTC  }],Message:,Reason:,HostIP:10.100.12.226,PodIP:10.34.0.6,StartTime:2019-06-20 11:14:53 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-20 11:14:55 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://be887d0e0fcd2a86c05ab1f3ed2beb6dd43fa374ef40e9642b906e77549bd1ea}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun 20 11:14:59.291: INFO: Pod "nginx-deployment-7b8c6f4498-l7hdn" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-7b8c6f4498-l7hdn,GenerateName:nginx-deployment-7b8c6f4498-,Namespace:deployment-1133,SelfLink:/api/v1/namespaces/deployment-1133/pods/nginx-deployment-7b8c6f4498-l7hdn,UID:ae92cd54-4465-46a5-962a-d4586869eae5,ResourceVersion:24337,Generation:0,CreationTimestamp:2019-06-20 11:14:53 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 7b8c6f4498,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-7b8c6f4498 029355d1-b157-4ecd-98b8-494baa43964a 0xc0025545a0 0xc0025545a1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-pq984 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pq984,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pq984 true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-100-10-111.eu-west-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002554600} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002554620}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:53 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:54 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:54 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:53 +0000 UTC  }],Message:,Reason:,HostIP:10.100.10.111,PodIP:10.38.0.2,StartTime:2019-06-20 11:14:53 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-20 11:14:54 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://e81523c415fd0f5b3c1ef0345527bb287c12bf537854359c1a89bacc3303169e}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun 20 11:14:59.291: INFO: Pod "nginx-deployment-7b8c6f4498-m542d" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-7b8c6f4498-m542d,GenerateName:nginx-deployment-7b8c6f4498-,Namespace:deployment-1133,SelfLink:/api/v1/namespaces/deployment-1133/pods/nginx-deployment-7b8c6f4498-m542d,UID:ac783104-1def-46fe-9ddb-10d6671fda4e,ResourceVersion:24453,Generation:0,CreationTimestamp:2019-06-20 11:14:59 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 7b8c6f4498,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-7b8c6f4498 029355d1-b157-4ecd-98b8-494baa43964a 0xc0025547e0 0xc0025547e1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-pq984 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pq984,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pq984 true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-100-10-111.eu-west-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc0025548e0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002554900}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:59 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun 20 11:14:59.291: INFO: Pod "nginx-deployment-7b8c6f4498-nffdc" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-7b8c6f4498-nffdc,GenerateName:nginx-deployment-7b8c6f4498-,Namespace:deployment-1133,SelfLink:/api/v1/namespaces/deployment-1133/pods/nginx-deployment-7b8c6f4498-nffdc,UID:44bcaa84-9262-42b0-8220-80096dd46d87,ResourceVersion:24350,Generation:0,CreationTimestamp:2019-06-20 11:14:53 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 7b8c6f4498,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-7b8c6f4498 029355d1-b157-4ecd-98b8-494baa43964a 0xc002554990 0xc002554991}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-pq984 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pq984,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pq984 true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-100-12-226.eu-west-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002554b20} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002554b40}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:53 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:55 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:55 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:53 +0000 UTC  }],Message:,Reason:,HostIP:10.100.12.226,PodIP:10.34.0.2,StartTime:2019-06-20 11:14:53 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-20 11:14:54 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://b0a80d313dc9648dd5e2506208912dbec31de22c6023596a5d1851e12b692409}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun 20 11:14:59.291: INFO: Pod "nginx-deployment-7b8c6f4498-ngxh4" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-7b8c6f4498-ngxh4,GenerateName:nginx-deployment-7b8c6f4498-,Namespace:deployment-1133,SelfLink:/api/v1/namespaces/deployment-1133/pods/nginx-deployment-7b8c6f4498-ngxh4,UID:488b95dd-2806-4d02-b0a9-8fedc7a5f857,ResourceVersion:24452,Generation:0,CreationTimestamp:2019-06-20 11:14:59 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 7b8c6f4498,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-7b8c6f4498 029355d1-b157-4ecd-98b8-494baa43964a 0xc002554d70 0xc002554d71}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-pq984 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pq984,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pq984 true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002554e10} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002554e30}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,},Status:PodStatus{Phase:Pending,Conditions:[],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun 20 11:14:59.291: INFO: Pod "nginx-deployment-7b8c6f4498-nlmrr" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-7b8c6f4498-nlmrr,GenerateName:nginx-deployment-7b8c6f4498-,Namespace:deployment-1133,SelfLink:/api/v1/namespaces/deployment-1133/pods/nginx-deployment-7b8c6f4498-nlmrr,UID:cfa5cd1b-444e-4a23-83c1-4bd0c244728c,ResourceVersion:24376,Generation:0,CreationTimestamp:2019-06-20 11:14:53 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 7b8c6f4498,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-7b8c6f4498 029355d1-b157-4ecd-98b8-494baa43964a 0xc002554f67 0xc002554f68}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-pq984 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pq984,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pq984 true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-100-10-111.eu-west-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002554fd0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002554ff0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:53 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:56 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:56 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:53 +0000 UTC  }],Message:,Reason:,HostIP:10.100.10.111,PodIP:10.38.0.4,StartTime:2019-06-20 11:14:53 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-20 11:14:54 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://f76c404a37acd073cb918c61e3b81e45351d1f4be9e8a0aefcb2587b8d52b4d3}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun 20 11:14:59.292: INFO: Pod "nginx-deployment-7b8c6f4498-nm2j4" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-7b8c6f4498-nm2j4,GenerateName:nginx-deployment-7b8c6f4498-,Namespace:deployment-1133,SelfLink:/api/v1/namespaces/deployment-1133/pods/nginx-deployment-7b8c6f4498-nm2j4,UID:dc9823e5-8a71-4ea1-b80b-d368bb77bcd9,ResourceVersion:24451,Generation:0,CreationTimestamp:2019-06-20 11:14:59 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 7b8c6f4498,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-7b8c6f4498 029355d1-b157-4ecd-98b8-494baa43964a 0xc0025550d0 0xc0025550d1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-pq984 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pq984,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pq984 true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002555140} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002555160}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,},Status:PodStatus{Phase:Pending,Conditions:[],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun 20 11:14:59.292: INFO: Pod "nginx-deployment-7b8c6f4498-phhcc" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-7b8c6f4498-phhcc,GenerateName:nginx-deployment-7b8c6f4498-,Namespace:deployment-1133,SelfLink:/api/v1/namespaces/deployment-1133/pods/nginx-deployment-7b8c6f4498-phhcc,UID:73ce9705-a1ba-4441-b0ac-44b258597983,ResourceVersion:24360,Generation:0,CreationTimestamp:2019-06-20 11:14:53 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 7b8c6f4498,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-7b8c6f4498 029355d1-b157-4ecd-98b8-494baa43964a 0xc0025551c7 0xc0025551c8}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-pq984 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pq984,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pq984 true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,} false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-100-12-226.eu-west-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],WindowsOptions:nil,},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002555230} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002555250}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:53 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:55 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:55 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-20 11:14:53 +0000 UTC  }],Message:,Reason:,HostIP:10.100.12.226,PodIP:10.34.0.4,StartTime:2019-06-20 11:14:53 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-20 11:14:54 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://6a8abde4fbc3396b81482b5304b0616e8cae308ed3934a9eb7ea0b644d966be7}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:14:59.292: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "deployment-1133" for this suite.
+Jun 20 11:15:07.334: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:15:07.415: INFO: namespace deployment-1133 deletion completed in 8.111177543s
+
+• [SLOW TEST:14.258 seconds]
+[sig-apps] Deployment
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23
+  deployment should support proportional scaling [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+S
+------------------------------
+[sig-storage] EmptyDir wrapper volumes 
+  should not cause race condition when used for configmaps [Serial] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] EmptyDir wrapper volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:15:07.415: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename emptydir-wrapper
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should not cause race condition when used for configmaps [Serial] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating 50 configmaps
+STEP: Creating RC which spawns configmap-volume pods
+Jun 20 11:15:07.676: INFO: Pod name wrapped-volume-race-2a43d899-5668-4539-9cee-dd9da0937d00: Found 1 pods out of 5
+Jun 20 11:15:12.682: INFO: Pod name wrapped-volume-race-2a43d899-5668-4539-9cee-dd9da0937d00: Found 5 pods out of 5
+STEP: Ensuring each pod is running
+STEP: deleting ReplicationController wrapped-volume-race-2a43d899-5668-4539-9cee-dd9da0937d00 in namespace emptydir-wrapper-7182, will wait for the garbage collector to delete the pods
+Jun 20 11:15:22.762: INFO: Deleting ReplicationController wrapped-volume-race-2a43d899-5668-4539-9cee-dd9da0937d00 took: 8.824422ms
+Jun 20 11:15:23.062: INFO: Terminating ReplicationController wrapped-volume-race-2a43d899-5668-4539-9cee-dd9da0937d00 pods took: 300.213059ms
+STEP: Creating RC which spawns configmap-volume pods
+Jun 20 11:15:58.478: INFO: Pod name wrapped-volume-race-eb9fc748-51c8-4d1e-a41d-621efba2edd2: Found 0 pods out of 5
+Jun 20 11:16:03.484: INFO: Pod name wrapped-volume-race-eb9fc748-51c8-4d1e-a41d-621efba2edd2: Found 5 pods out of 5
+STEP: Ensuring each pod is running
+STEP: deleting ReplicationController wrapped-volume-race-eb9fc748-51c8-4d1e-a41d-621efba2edd2 in namespace emptydir-wrapper-7182, will wait for the garbage collector to delete the pods
+Jun 20 11:16:13.563: INFO: Deleting ReplicationController wrapped-volume-race-eb9fc748-51c8-4d1e-a41d-621efba2edd2 took: 7.652955ms
+Jun 20 11:16:13.863: INFO: Terminating ReplicationController wrapped-volume-race-eb9fc748-51c8-4d1e-a41d-621efba2edd2 pods took: 300.198857ms
+STEP: Creating RC which spawns configmap-volume pods
+Jun 20 11:16:50.179: INFO: Pod name wrapped-volume-race-27263d7b-92d8-4db3-9224-89529a55624c: Found 0 pods out of 5
+Jun 20 11:16:55.187: INFO: Pod name wrapped-volume-race-27263d7b-92d8-4db3-9224-89529a55624c: Found 5 pods out of 5
+STEP: Ensuring each pod is running
+STEP: deleting ReplicationController wrapped-volume-race-27263d7b-92d8-4db3-9224-89529a55624c in namespace emptydir-wrapper-7182, will wait for the garbage collector to delete the pods
+Jun 20 11:17:07.297: INFO: Deleting ReplicationController wrapped-volume-race-27263d7b-92d8-4db3-9224-89529a55624c took: 7.493311ms
+Jun 20 11:17:07.597: INFO: Terminating ReplicationController wrapped-volume-race-27263d7b-92d8-4db3-9224-89529a55624c pods took: 300.309654ms
+STEP: Cleaning up the configMaps
+[AfterEach] [sig-storage] EmptyDir wrapper volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:17:47.480: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-wrapper-7182" for this suite.
+Jun 20 11:17:55.495: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:17:55.563: INFO: namespace emptydir-wrapper-7182 deletion completed in 8.07979973s
+
+• [SLOW TEST:168.148 seconds]
+[sig-storage] EmptyDir wrapper volumes
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22
+  should not cause race condition when used for configmaps [Serial] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:17:55.563: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename gc
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: create the deployment
+STEP: Wait for the Deployment to create new ReplicaSet
+STEP: delete the deployment
+STEP: wait for 30 seconds to see if the garbage collector mistakenly deletes the rs
+STEP: Gathering metrics
+Jun 20 11:18:26.131: INFO: For apiserver_request_total:
+For apiserver_request_latencies_summary:
+For apiserver_init_events_total:
+For garbage_collector_attempt_to_delete_queue_latency:
+For garbage_collector_attempt_to_delete_work_duration:
+For garbage_collector_attempt_to_orphan_queue_latency:
+For garbage_collector_attempt_to_orphan_work_duration:
+For garbage_collector_dirty_processing_latency_microseconds:
+For garbage_collector_event_processing_latency_microseconds:
+For garbage_collector_graph_changes_queue_latency:
+For garbage_collector_graph_changes_work_duration:
+For garbage_collector_orphan_processing_latency_microseconds:
+For namespace_queue_latency:
+For namespace_queue_latency_sum:
+For namespace_queue_latency_count:
+For namespace_retries:
+For namespace_work_duration:
+For namespace_work_duration_sum:
+For namespace_work_duration_count:
+For function_duration_seconds:
+For errors_total:
+For evicted_pods_total:
+
+W0620 11:18:26.131031      15 metrics_grabber.go:79] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:18:26.131: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "gc-5193" for this suite.
+Jun 20 11:18:32.146: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:18:32.210: INFO: namespace gc-5193 deletion completed in 6.07536446s
+
+• [SLOW TEST:36.646 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23
+  should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected configMap 
+  optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:18:32.211: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating configMap with name cm-test-opt-del-de13e583-b4cc-4dc7-bd66-d5a3d8b92cb0
+STEP: Creating configMap with name cm-test-opt-upd-ba375a5c-46dd-448b-8020-c40e12f99e55
+STEP: Creating the pod
+STEP: Deleting configmap cm-test-opt-del-de13e583-b4cc-4dc7-bd66-d5a3d8b92cb0
+STEP: Updating configmap cm-test-opt-upd-ba375a5c-46dd-448b-8020-c40e12f99e55
+STEP: Creating configMap with name cm-test-opt-create-8e6f5e48-97f1-4f27-8b16-ee0bee25a451
+STEP: waiting to observe update in volume
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:20:02.685: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-3924" for this suite.
+Jun 20 11:20:24.700: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:20:24.814: INFO: namespace projected-3924 deletion completed in 22.125240977s
+
+• [SLOW TEST:112.603 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:33
+  optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSS
+------------------------------
+[sig-storage] Projected configMap 
+  should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:20:24.814: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating configMap with name projected-configmap-test-volume-9bd75b5f-6ec8-4a58-af22-003989a88759
+STEP: Creating a pod to test consume configMaps
+Jun 20 11:20:24.862: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-17377aaa-ece2-48f7-aed4-3c2b9300f77a" in namespace "projected-2013" to be "success or failure"
+Jun 20 11:20:24.866: INFO: Pod "pod-projected-configmaps-17377aaa-ece2-48f7-aed4-3c2b9300f77a": Phase="Pending", Reason="", readiness=false. Elapsed: 3.650661ms
+Jun 20 11:20:26.869: INFO: Pod "pod-projected-configmaps-17377aaa-ece2-48f7-aed4-3c2b9300f77a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006946583s
+STEP: Saw pod success
+Jun 20 11:20:26.869: INFO: Pod "pod-projected-configmaps-17377aaa-ece2-48f7-aed4-3c2b9300f77a" satisfied condition "success or failure"
+Jun 20 11:20:26.872: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-projected-configmaps-17377aaa-ece2-48f7-aed4-3c2b9300f77a container projected-configmap-volume-test: 
+STEP: delete the pod
+Jun 20 11:20:26.888: INFO: Waiting for pod pod-projected-configmaps-17377aaa-ece2-48f7-aed4-3c2b9300f77a to disappear
+Jun 20 11:20:26.890: INFO: Pod pod-projected-configmaps-17377aaa-ece2-48f7-aed4-3c2b9300f77a no longer exists
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:20:26.890: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-2013" for this suite.
+Jun 20 11:20:32.903: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:20:32.969: INFO: namespace projected-2013 deletion completed in 6.075344198s
+
+• [SLOW TEST:8.155 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:33
+  should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SS
+------------------------------
+[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] 
+  Should recreate evicted statefulset [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:20:32.969: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename statefulset
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:60
+[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:75
+STEP: Creating service test in namespace statefulset-785
+[It] Should recreate evicted statefulset [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Looking for a node to schedule stateful set and pod
+STEP: Creating pod with conflicting port in namespace statefulset-785
+STEP: Creating statefulset with conflicting port in namespace statefulset-785
+STEP: Waiting until pod test-pod will start running in namespace statefulset-785
+STEP: Waiting until stateful pod ss-0 will be recreated and deleted at least once in namespace statefulset-785
+Jun 20 11:20:37.033: INFO: Observed stateful pod in namespace: statefulset-785, name: ss-0, uid: 2199106a-34f0-4711-965b-db23d5f6c533, status phase: Pending. Waiting for statefulset controller to delete.
+Jun 20 11:20:37.135: INFO: Observed stateful pod in namespace: statefulset-785, name: ss-0, uid: 2199106a-34f0-4711-965b-db23d5f6c533, status phase: Failed. Waiting for statefulset controller to delete.
+Jun 20 11:20:37.141: INFO: Observed stateful pod in namespace: statefulset-785, name: ss-0, uid: 2199106a-34f0-4711-965b-db23d5f6c533, status phase: Failed. Waiting for statefulset controller to delete.
+Jun 20 11:20:37.148: INFO: Observed delete event for stateful pod ss-0 in namespace statefulset-785
+STEP: Removing pod with conflicting port in namespace statefulset-785
+STEP: Waiting when stateful pod ss-0 will be recreated in namespace statefulset-785 and will be in running state
+[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:86
+Jun 20 11:20:49.196: INFO: Deleting all statefulset in ns statefulset-785
+Jun 20 11:20:49.199: INFO: Scaling statefulset ss to 0
+Jun 20 11:20:59.211: INFO: Waiting for statefulset status.replicas updated to 0
+Jun 20 11:20:59.213: INFO: Deleting statefulset ss
+[AfterEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:20:59.223: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "statefulset-785" for this suite.
+Jun 20 11:21:05.237: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:21:05.338: INFO: namespace statefulset-785 deletion completed in 6.111534865s
+
+• [SLOW TEST:32.369 seconds]
+[sig-apps] StatefulSet
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23
+  [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+    Should recreate evicted statefulset [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSS
+------------------------------
+[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] 
+  Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:21:05.338: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename statefulset
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:60
+[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:75
+STEP: Creating service test in namespace statefulset-1395
+[It] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Initializing watcher for selector baz=blah,foo=bar
+STEP: Creating stateful set ss in namespace statefulset-1395
+STEP: Waiting until all stateful set ss replicas will be running in namespace statefulset-1395
+Jun 20 11:21:05.390: INFO: Found 0 stateful pods, waiting for 1
+Jun 20 11:21:15.393: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true
+STEP: Confirming that stateful set scale up will halt with unhealthy stateful pod
+Jun 20 11:21:15.396: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 exec --namespace=statefulset-1395 ss-0 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+Jun 20 11:21:15.648: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n"
+Jun 20 11:21:15.648: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+Jun 20 11:21:15.648: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-0: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+Jun 20 11:21:15.651: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=true
+Jun 20 11:21:25.654: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false
+Jun 20 11:21:25.654: INFO: Waiting for statefulset status.replicas updated to 0
+Jun 20 11:21:25.669: INFO: Verifying statefulset ss doesn't scale past 1 for another 9.999999751s
+Jun 20 11:21:26.673: INFO: Verifying statefulset ss doesn't scale past 1 for another 8.995698047s
+Jun 20 11:21:27.677: INFO: Verifying statefulset ss doesn't scale past 1 for another 7.99177553s
+Jun 20 11:21:28.681: INFO: Verifying statefulset ss doesn't scale past 1 for another 6.987538244s
+Jun 20 11:21:29.685: INFO: Verifying statefulset ss doesn't scale past 1 for another 5.984180356s
+Jun 20 11:21:30.689: INFO: Verifying statefulset ss doesn't scale past 1 for another 4.980156606s
+Jun 20 11:21:31.692: INFO: Verifying statefulset ss doesn't scale past 1 for another 3.976329473s
+Jun 20 11:21:32.696: INFO: Verifying statefulset ss doesn't scale past 1 for another 2.972667417s
+Jun 20 11:21:33.699: INFO: Verifying statefulset ss doesn't scale past 1 for another 1.968819641s
+Jun 20 11:21:34.703: INFO: Verifying statefulset ss doesn't scale past 1 for another 965.378474ms
+STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-1395
+Jun 20 11:21:35.706: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 exec --namespace=statefulset-1395 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun 20 11:21:35.873: INFO: stderr: "+ mv -v /tmp/index.html /usr/share/nginx/html/\n"
+Jun 20 11:21:35.873: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+Jun 20 11:21:35.873: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-0: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+Jun 20 11:21:35.878: INFO: Found 1 stateful pods, waiting for 3
+Jun 20 11:21:45.882: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true
+Jun 20 11:21:45.882: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true
+Jun 20 11:21:45.882: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=true
+STEP: Verifying that stateful set ss was scaled up in order
+STEP: Scale down will halt with unhealthy stateful pod
+Jun 20 11:21:45.886: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 exec --namespace=statefulset-1395 ss-0 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+Jun 20 11:21:46.057: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n"
+Jun 20 11:21:46.057: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+Jun 20 11:21:46.057: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-0: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+Jun 20 11:21:46.057: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 exec --namespace=statefulset-1395 ss-1 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+Jun 20 11:21:46.218: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n"
+Jun 20 11:21:46.218: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+Jun 20 11:21:46.218: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-1: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+Jun 20 11:21:46.218: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 exec --namespace=statefulset-1395 ss-2 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+Jun 20 11:21:46.440: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n"
+Jun 20 11:21:46.440: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+Jun 20 11:21:46.440: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-2: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+Jun 20 11:21:46.440: INFO: Waiting for statefulset status.replicas updated to 0
+Jun 20 11:21:46.443: INFO: Waiting for stateful set status.readyReplicas to become 0, currently 3
+Jun 20 11:21:56.449: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false
+Jun 20 11:21:56.449: INFO: Waiting for pod ss-1 to enter Running - Ready=false, currently Running - Ready=false
+Jun 20 11:21:56.449: INFO: Waiting for pod ss-2 to enter Running - Ready=false, currently Running - Ready=false
+Jun 20 11:21:56.458: INFO: Verifying statefulset ss doesn't scale past 3 for another 9.999999825s
+Jun 20 11:21:57.462: INFO: Verifying statefulset ss doesn't scale past 3 for another 8.997072588s
+Jun 20 11:21:58.466: INFO: Verifying statefulset ss doesn't scale past 3 for another 7.993217971s
+Jun 20 11:21:59.469: INFO: Verifying statefulset ss doesn't scale past 3 for another 6.989288088s
+Jun 20 11:22:00.473: INFO: Verifying statefulset ss doesn't scale past 3 for another 5.98566879s
+Jun 20 11:22:01.477: INFO: Verifying statefulset ss doesn't scale past 3 for another 4.981959212s
+Jun 20 11:22:02.481: INFO: Verifying statefulset ss doesn't scale past 3 for another 3.97827107s
+Jun 20 11:22:03.484: INFO: Verifying statefulset ss doesn't scale past 3 for another 2.974072908s
+Jun 20 11:22:04.488: INFO: Verifying statefulset ss doesn't scale past 3 for another 1.970834168s
+Jun 20 11:22:05.492: INFO: Verifying statefulset ss doesn't scale past 3 for another 967.136668ms
+STEP: Scaling down stateful set ss to 0 replicas and waiting until none of pods will run in namespacestatefulset-1395
+Jun 20 11:22:06.496: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 exec --namespace=statefulset-1395 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun 20 11:22:06.657: INFO: stderr: "+ mv -v /tmp/index.html /usr/share/nginx/html/\n"
+Jun 20 11:22:06.657: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+Jun 20 11:22:06.657: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-0: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+Jun 20 11:22:06.657: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 exec --namespace=statefulset-1395 ss-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun 20 11:22:06.833: INFO: stderr: "+ mv -v /tmp/index.html /usr/share/nginx/html/\n"
+Jun 20 11:22:06.833: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+Jun 20 11:22:06.833: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-1: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+Jun 20 11:22:06.833: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 exec --namespace=statefulset-1395 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun 20 11:22:07.020: INFO: stderr: "+ mv -v /tmp/index.html /usr/share/nginx/html/\n"
+Jun 20 11:22:07.020: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+Jun 20 11:22:07.020: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-2: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+Jun 20 11:22:07.020: INFO: Scaling statefulset ss to 0
+STEP: Verifying that stateful set ss was scaled down in reverse order
+[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:86
+Jun 20 11:22:17.033: INFO: Deleting all statefulset in ns statefulset-1395
+Jun 20 11:22:17.035: INFO: Scaling statefulset ss to 0
+Jun 20 11:22:17.041: INFO: Waiting for statefulset status.replicas updated to 0
+Jun 20 11:22:17.043: INFO: Deleting statefulset ss
+[AfterEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:22:17.053: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "statefulset-1395" for this suite.
+Jun 20 11:22:23.065: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:22:23.130: INFO: namespace statefulset-1395 deletion completed in 6.074547062s
+
+• [SLOW TEST:77.792 seconds]
+[sig-apps] StatefulSet
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23
+  [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+    Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSS
+------------------------------
+[sig-storage] Projected configMap 
+  should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:22:23.130: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating configMap with name projected-configmap-test-volume-50515079-e4bf-4954-888f-51d436083356
+STEP: Creating a pod to test consume configMaps
+Jun 20 11:22:23.173: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-f347b6a7-31ed-4687-832f-aaca1b6070fd" in namespace "projected-6221" to be "success or failure"
+Jun 20 11:22:23.176: INFO: Pod "pod-projected-configmaps-f347b6a7-31ed-4687-832f-aaca1b6070fd": Phase="Pending", Reason="", readiness=false. Elapsed: 2.615508ms
+Jun 20 11:22:25.179: INFO: Pod "pod-projected-configmaps-f347b6a7-31ed-4687-832f-aaca1b6070fd": Phase="Pending", Reason="", readiness=false. Elapsed: 2.005563687s
+Jun 20 11:22:27.182: INFO: Pod "pod-projected-configmaps-f347b6a7-31ed-4687-832f-aaca1b6070fd": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.009023883s
+STEP: Saw pod success
+Jun 20 11:22:27.182: INFO: Pod "pod-projected-configmaps-f347b6a7-31ed-4687-832f-aaca1b6070fd" satisfied condition "success or failure"
+Jun 20 11:22:27.185: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-projected-configmaps-f347b6a7-31ed-4687-832f-aaca1b6070fd container projected-configmap-volume-test: 
+STEP: delete the pod
+Jun 20 11:22:27.201: INFO: Waiting for pod pod-projected-configmaps-f347b6a7-31ed-4687-832f-aaca1b6070fd to disappear
+Jun 20 11:22:27.204: INFO: Pod pod-projected-configmaps-f347b6a7-31ed-4687-832f-aaca1b6070fd no longer exists
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:22:27.204: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-6221" for this suite.
+Jun 20 11:22:33.218: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:22:33.283: INFO: namespace projected-6221 deletion completed in 6.075704002s
+
+• [SLOW TEST:10.153 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:33
+  should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSS
+------------------------------
+[sig-storage] EmptyDir wrapper volumes 
+  should not conflict [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] EmptyDir wrapper volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:22:33.283: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename emptydir-wrapper
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should not conflict [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Cleaning up the secret
+STEP: Cleaning up the configmap
+STEP: Cleaning up the pod
+[AfterEach] [sig-storage] EmptyDir wrapper volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:22:37.362: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-wrapper-6551" for this suite.
+Jun 20 11:22:43.376: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:22:43.441: INFO: namespace emptydir-wrapper-6551 deletion completed in 6.074430275s
+
+• [SLOW TEST:10.158 seconds]
+[sig-storage] EmptyDir wrapper volumes
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22
+  should not conflict [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] [sig-node] Pods Extended [k8s.io] Pods Set QOS Class 
+  should be submitted and removed  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] [sig-node] Pods Extended
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:22:43.441: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename pods
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods Set QOS Class
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/node/pods.go:179
+[It] should be submitted and removed  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: creating the pod
+STEP: submitting the pod to kubernetes
+STEP: verifying QOS class is set on the pod
+[AfterEach] [k8s.io] [sig-node] Pods Extended
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:22:43.480: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pods-8188" for this suite.
+Jun 20 11:23:05.496: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:23:05.566: INFO: namespace pods-8188 deletion completed in 22.079883561s
+
+• [SLOW TEST:22.125 seconds]
+[k8s.io] [sig-node] Pods Extended
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  [k8s.io] Pods Set QOS Class
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+    should be submitted and removed  [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSS
+------------------------------
+[k8s.io] Kubelet when scheduling a busybox command that always fails in a pod 
+  should be possible to delete [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:23:05.566: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename kubelet-test
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37
+[BeforeEach] when scheduling a busybox command that always fails in a pod
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:81
+[It] should be possible to delete [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[AfterEach] [k8s.io] Kubelet
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:23:05.629: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubelet-test-9559" for this suite.
+Jun 20 11:23:11.642: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:23:11.729: INFO: namespace kubelet-test-9559 deletion completed in 6.096563989s
+
+• [SLOW TEST:6.163 seconds]
+[k8s.io] Kubelet
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  when scheduling a busybox command that always fails in a pod
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:78
+    should be possible to delete [NodeConformance] [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSS
+------------------------------
+[k8s.io] Probing container 
+  with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:23:11.729: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename container-probe
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:51
+[It] with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[AfterEach] [k8s.io] Probing container
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:24:11.771: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-probe-134" for this suite.
+Jun 20 11:24:33.784: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:24:33.883: INFO: namespace container-probe-134 deletion completed in 22.109067224s
+
+• [SLOW TEST:82.155 seconds]
+[k8s.io] Probing container
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl rolling-update 
+  should support rolling-update to same image  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:24:33.885: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:221
+[BeforeEach] [k8s.io] Kubectl rolling-update
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1517
+[It] should support rolling-update to same image  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: running the image docker.io/library/nginx:1.14-alpine
+Jun 20 11:24:33.916: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 run e2e-test-nginx-rc --image=docker.io/library/nginx:1.14-alpine --generator=run/v1 --namespace=kubectl-4453'
+Jun 20 11:24:34.162: INFO: stderr: "kubectl run --generator=run/v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n"
+Jun 20 11:24:34.162: INFO: stdout: "replicationcontroller/e2e-test-nginx-rc created\n"
+STEP: verifying the rc e2e-test-nginx-rc was created
+Jun 20 11:24:34.168: INFO: Waiting for rc e2e-test-nginx-rc to stabilize, generation 1 observed generation 0 spec.replicas 1 status.replicas 0
+Jun 20 11:24:34.169: INFO: Waiting for rc e2e-test-nginx-rc to stabilize, generation 1 observed generation 1 spec.replicas 1 status.replicas 0
+STEP: rolling-update to same image controller
+Jun 20 11:24:34.176: INFO: scanned /root for discovery docs: 
+Jun 20 11:24:34.176: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 rolling-update e2e-test-nginx-rc --update-period=1s --image=docker.io/library/nginx:1.14-alpine --image-pull-policy=IfNotPresent --namespace=kubectl-4453'
+Jun 20 11:24:49.945: INFO: stderr: "Command \"rolling-update\" is deprecated, use \"rollout\" instead\n"
+Jun 20 11:24:49.945: INFO: stdout: "Created e2e-test-nginx-rc-44d0e2b41dfce81aadd53ebc8abb1c22\nScaling up e2e-test-nginx-rc-44d0e2b41dfce81aadd53ebc8abb1c22 from 0 to 1, scaling down e2e-test-nginx-rc from 1 to 0 (keep 1 pods available, don't exceed 2 pods)\nScaling e2e-test-nginx-rc-44d0e2b41dfce81aadd53ebc8abb1c22 up to 1\nScaling e2e-test-nginx-rc down to 0\nUpdate succeeded. Deleting old controller: e2e-test-nginx-rc\nRenaming e2e-test-nginx-rc-44d0e2b41dfce81aadd53ebc8abb1c22 to e2e-test-nginx-rc\nreplicationcontroller/e2e-test-nginx-rc rolling updated\n"
+Jun 20 11:24:49.945: INFO: stdout: "Created e2e-test-nginx-rc-44d0e2b41dfce81aadd53ebc8abb1c22\nScaling up e2e-test-nginx-rc-44d0e2b41dfce81aadd53ebc8abb1c22 from 0 to 1, scaling down e2e-test-nginx-rc from 1 to 0 (keep 1 pods available, don't exceed 2 pods)\nScaling e2e-test-nginx-rc-44d0e2b41dfce81aadd53ebc8abb1c22 up to 1\nScaling e2e-test-nginx-rc down to 0\nUpdate succeeded. Deleting old controller: e2e-test-nginx-rc\nRenaming e2e-test-nginx-rc-44d0e2b41dfce81aadd53ebc8abb1c22 to e2e-test-nginx-rc\nreplicationcontroller/e2e-test-nginx-rc rolling updated\n"
+STEP: waiting for all containers in run=e2e-test-nginx-rc pods to come up.
+Jun 20 11:24:49.945: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l run=e2e-test-nginx-rc --namespace=kubectl-4453'
+Jun 20 11:24:50.022: INFO: stderr: ""
+Jun 20 11:24:50.022: INFO: stdout: "e2e-test-nginx-rc-44d0e2b41dfce81aadd53ebc8abb1c22-hbblx "
+Jun 20 11:24:50.022: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods e2e-test-nginx-rc-44d0e2b41dfce81aadd53ebc8abb1c22-hbblx -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "e2e-test-nginx-rc") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-4453'
+Jun 20 11:24:50.087: INFO: stderr: ""
+Jun 20 11:24:50.087: INFO: stdout: "true"
+Jun 20 11:24:50.087: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 get pods e2e-test-nginx-rc-44d0e2b41dfce81aadd53ebc8abb1c22-hbblx -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "e2e-test-nginx-rc"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-4453'
+Jun 20 11:24:50.151: INFO: stderr: ""
+Jun 20 11:24:50.151: INFO: stdout: "docker.io/library/nginx:1.14-alpine"
+Jun 20 11:24:50.151: INFO: e2e-test-nginx-rc-44d0e2b41dfce81aadd53ebc8abb1c22-hbblx is verified up and running
+[AfterEach] [k8s.io] Kubectl rolling-update
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1523
+Jun 20 11:24:50.152: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 delete rc e2e-test-nginx-rc --namespace=kubectl-4453'
+Jun 20 11:24:50.224: INFO: stderr: ""
+Jun 20 11:24:50.224: INFO: stdout: "replicationcontroller \"e2e-test-nginx-rc\" deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:24:50.224: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-4453" for this suite.
+Jun 20 11:25:12.240: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:25:12.307: INFO: namespace kubectl-4453 deletion completed in 22.079512526s
+
+• [SLOW TEST:38.422 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl rolling-update
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+    should support rolling-update to same image  [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-network] Networking Granular Checks: Pods 
+  should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-network] Networking
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:25:12.308: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename pod-network-test
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Performing setup for networking test in namespace pod-network-test-346
+STEP: creating a selector
+STEP: Creating the service pods in kubernetes
+Jun 20 11:25:12.339: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable
+STEP: Creating test pods
+Jun 20 11:25:34.402: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://10.38.0.2:8080/hostName | grep -v '^\s*$'] Namespace:pod-network-test-346 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 20 11:25:34.402: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+Jun 20 11:25:34.508: INFO: Found all expected endpoints: [netserver-0]
+Jun 20 11:25:34.511: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://10.34.0.2:8080/hostName | grep -v '^\s*$'] Namespace:pod-network-test-346 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun 20 11:25:34.511: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+Jun 20 11:25:34.609: INFO: Found all expected endpoints: [netserver-1]
+[AfterEach] [sig-network] Networking
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:25:34.609: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pod-network-test-346" for this suite.
+Jun 20 11:25:56.624: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:25:56.688: INFO: namespace pod-network-test-346 deletion completed in 22.075224625s
+
+• [SLOW TEST:44.380 seconds]
+[sig-network] Networking
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:25
+  Granular Checks: Pods
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:28
+    should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSS
+------------------------------
+[sig-node] ConfigMap 
+  should fail to create ConfigMap with empty key [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-node] ConfigMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:25:56.689: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename configmap
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should fail to create ConfigMap with empty key [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating configMap that has name configmap-test-emptyKey-58b2c54e-1df0-45b0-8dfa-823b7394d981
+[AfterEach] [sig-node] ConfigMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:25:56.734: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "configmap-352" for this suite.
+Jun 20 11:26:02.746: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:26:02.813: INFO: namespace configmap-352 deletion completed in 6.076158623s
+
+• [SLOW TEST:6.125 seconds]
+[sig-node] ConfigMap
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap.go:31
+  should fail to create ConfigMap with empty key [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-network] Proxy version v1 
+  should proxy logs on node using proxy subresource  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] version v1
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:26:02.814: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename proxy
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should proxy logs on node using proxy subresource  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+Jun 20 11:26:02.864: INFO: (0) /api/v1/nodes/ip-10-100-10-111.eu-west-1.compute.internal/proxy/logs/: 
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename secrets
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating secret with name secret-test-8d446103-5b21-48d3-a3c7-4647f9915ee7
+STEP: Creating a pod to test consume secrets
+Jun 20 11:26:09.045: INFO: Waiting up to 5m0s for pod "pod-secrets-e11f20d9-3495-4a04-a696-517179aa47fd" in namespace "secrets-4845" to be "success or failure"
+Jun 20 11:26:09.052: INFO: Pod "pod-secrets-e11f20d9-3495-4a04-a696-517179aa47fd": Phase="Pending", Reason="", readiness=false. Elapsed: 7.096155ms
+Jun 20 11:26:11.056: INFO: Pod "pod-secrets-e11f20d9-3495-4a04-a696-517179aa47fd": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.010450443s
+STEP: Saw pod success
+Jun 20 11:26:11.056: INFO: Pod "pod-secrets-e11f20d9-3495-4a04-a696-517179aa47fd" satisfied condition "success or failure"
+Jun 20 11:26:11.058: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-secrets-e11f20d9-3495-4a04-a696-517179aa47fd container secret-volume-test: 
+STEP: delete the pod
+Jun 20 11:26:11.076: INFO: Waiting for pod pod-secrets-e11f20d9-3495-4a04-a696-517179aa47fd to disappear
+Jun 20 11:26:11.078: INFO: Pod pod-secrets-e11f20d9-3495-4a04-a696-517179aa47fd no longer exists
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:26:11.078: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "secrets-4845" for this suite.
+Jun 20 11:26:17.091: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:26:17.159: INFO: namespace secrets-4845 deletion completed in 6.077409975s
+
+• [SLOW TEST:8.157 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:33
+  should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSS
+------------------------------
+[k8s.io] Probing container 
+  should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:26:17.159: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename container-probe
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:51
+[It] should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating pod test-webserver-37c94d39-89f1-48ad-ace6-656ca47d4ba0 in namespace container-probe-7671
+Jun 20 11:26:19.205: INFO: Started pod test-webserver-37c94d39-89f1-48ad-ace6-656ca47d4ba0 in namespace container-probe-7671
+STEP: checking the pod's current state and verifying that restartCount is present
+Jun 20 11:26:19.208: INFO: Initial restart count of pod test-webserver-37c94d39-89f1-48ad-ace6-656ca47d4ba0 is 0
+STEP: deleting the pod
+[AfterEach] [k8s.io] Probing container
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:30:19.661: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-probe-7671" for this suite.
+Jun 20 11:30:25.683: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:30:25.748: INFO: namespace container-probe-7671 deletion completed in 6.076538964s
+
+• [SLOW TEST:248.589 seconds]
+[k8s.io] Probing container
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should provide podname only [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:30:25.748: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should provide podname only [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test downward API volume plugin
+Jun 20 11:30:25.790: INFO: Waiting up to 5m0s for pod "downwardapi-volume-1fa2e4e9-da40-4b04-ba3c-c4a447066299" in namespace "downward-api-2480" to be "success or failure"
+Jun 20 11:30:25.793: INFO: Pod "downwardapi-volume-1fa2e4e9-da40-4b04-ba3c-c4a447066299": Phase="Pending", Reason="", readiness=false. Elapsed: 3.308293ms
+Jun 20 11:30:27.797: INFO: Pod "downwardapi-volume-1fa2e4e9-da40-4b04-ba3c-c4a447066299": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.007055992s
+STEP: Saw pod success
+Jun 20 11:30:27.797: INFO: Pod "downwardapi-volume-1fa2e4e9-da40-4b04-ba3c-c4a447066299" satisfied condition "success or failure"
+Jun 20 11:30:27.800: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod downwardapi-volume-1fa2e4e9-da40-4b04-ba3c-c4a447066299 container client-container: 
+STEP: delete the pod
+Jun 20 11:30:27.819: INFO: Waiting for pod downwardapi-volume-1fa2e4e9-da40-4b04-ba3c-c4a447066299 to disappear
+Jun 20 11:30:27.821: INFO: Pod downwardapi-volume-1fa2e4e9-da40-4b04-ba3c-c4a447066299 no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:30:27.822: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-2480" for this suite.
+Jun 20 11:30:33.834: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:30:33.904: INFO: namespace downward-api-2480 deletion completed in 6.079485887s
+
+• [SLOW TEST:8.156 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should provide podname only [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+[sig-storage] EmptyDir volumes 
+  volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:30:33.904: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test emptydir volume type on node default medium
+Jun 20 11:30:33.942: INFO: Waiting up to 5m0s for pod "pod-cef6bd5a-90e1-4ae4-90d0-9eeb154974ad" in namespace "emptydir-3547" to be "success or failure"
+Jun 20 11:30:33.945: INFO: Pod "pod-cef6bd5a-90e1-4ae4-90d0-9eeb154974ad": Phase="Pending", Reason="", readiness=false. Elapsed: 3.335496ms
+Jun 20 11:30:35.948: INFO: Pod "pod-cef6bd5a-90e1-4ae4-90d0-9eeb154974ad": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006538739s
+STEP: Saw pod success
+Jun 20 11:30:35.948: INFO: Pod "pod-cef6bd5a-90e1-4ae4-90d0-9eeb154974ad" satisfied condition "success or failure"
+Jun 20 11:30:35.951: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-cef6bd5a-90e1-4ae4-90d0-9eeb154974ad container test-container: 
+STEP: delete the pod
+Jun 20 11:30:35.969: INFO: Waiting for pod pod-cef6bd5a-90e1-4ae4-90d0-9eeb154974ad to disappear
+Jun 20 11:30:35.972: INFO: Pod pod-cef6bd5a-90e1-4ae4-90d0-9eeb154974ad no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:30:35.972: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-3547" for this suite.
+Jun 20 11:30:41.985: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:30:42.078: INFO: namespace emptydir-3547 deletion completed in 6.10325931s
+
+• [SLOW TEST:8.174 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSS
+------------------------------
+[k8s.io] Pods 
+  should be submitted and removed [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:30:42.078: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename pods
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:164
+[It] should be submitted and removed [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: creating the pod
+STEP: setting up watch
+STEP: submitting the pod to kubernetes
+Jun 20 11:30:42.113: INFO: observed the pod list
+STEP: verifying the pod is in kubernetes
+STEP: verifying pod creation was observed
+STEP: deleting the pod gracefully
+STEP: verifying the kubelet observed the termination notice
+STEP: verifying pod deletion was observed
+[AfterEach] [k8s.io] Pods
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:30:57.154: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pods-2177" for this suite.
+Jun 20 11:31:03.168: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:31:03.250: INFO: namespace pods-2177 deletion completed in 6.092534128s
+
+• [SLOW TEST:21.172 seconds]
+[k8s.io] Pods
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  should be submitted and removed [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-node] Downward API 
+  should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-node] Downward API
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:31:03.251: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test downward api env vars
+Jun 20 11:31:03.289: INFO: Waiting up to 5m0s for pod "downward-api-c20cfbe8-60ef-433d-921a-3f3cfd5fbf4f" in namespace "downward-api-5994" to be "success or failure"
+Jun 20 11:31:03.291: INFO: Pod "downward-api-c20cfbe8-60ef-433d-921a-3f3cfd5fbf4f": Phase="Pending", Reason="", readiness=false. Elapsed: 2.511863ms
+Jun 20 11:31:05.295: INFO: Pod "downward-api-c20cfbe8-60ef-433d-921a-3f3cfd5fbf4f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.005585131s
+STEP: Saw pod success
+Jun 20 11:31:05.295: INFO: Pod "downward-api-c20cfbe8-60ef-433d-921a-3f3cfd5fbf4f" satisfied condition "success or failure"
+Jun 20 11:31:05.297: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod downward-api-c20cfbe8-60ef-433d-921a-3f3cfd5fbf4f container dapi-container: 
+STEP: delete the pod
+Jun 20 11:31:05.314: INFO: Waiting for pod downward-api-c20cfbe8-60ef-433d-921a-3f3cfd5fbf4f to disappear
+Jun 20 11:31:05.317: INFO: Pod downward-api-c20cfbe8-60ef-433d-921a-3f3cfd5fbf4f no longer exists
+[AfterEach] [sig-node] Downward API
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:31:05.317: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-5994" for this suite.
+Jun 20 11:31:11.333: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:31:11.408: INFO: namespace downward-api-5994 deletion completed in 6.084791455s
+
+• [SLOW TEST:8.156 seconds]
+[sig-node] Downward API
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:32
+  should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Subpath Atomic writer volumes 
+  should support subpaths with configmap pod with mountPath of existing file [LinuxOnly] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Subpath
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:31:11.408: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename subpath
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] Atomic writer volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:37
+STEP: Setting up data
+[It] should support subpaths with configmap pod with mountPath of existing file [LinuxOnly] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating pod pod-subpath-test-configmap-sf67
+STEP: Creating a pod to test atomic-volume-subpath
+Jun 20 11:31:11.456: INFO: Waiting up to 5m0s for pod "pod-subpath-test-configmap-sf67" in namespace "subpath-1137" to be "success or failure"
+Jun 20 11:31:11.461: INFO: Pod "pod-subpath-test-configmap-sf67": Phase="Pending", Reason="", readiness=false. Elapsed: 4.814785ms
+Jun 20 11:31:13.466: INFO: Pod "pod-subpath-test-configmap-sf67": Phase="Running", Reason="", readiness=true. Elapsed: 2.009231052s
+Jun 20 11:31:15.470: INFO: Pod "pod-subpath-test-configmap-sf67": Phase="Running", Reason="", readiness=true. Elapsed: 4.013370673s
+Jun 20 11:31:17.473: INFO: Pod "pod-subpath-test-configmap-sf67": Phase="Running", Reason="", readiness=true. Elapsed: 6.016825948s
+Jun 20 11:31:19.476: INFO: Pod "pod-subpath-test-configmap-sf67": Phase="Running", Reason="", readiness=true. Elapsed: 8.01997302s
+Jun 20 11:31:21.480: INFO: Pod "pod-subpath-test-configmap-sf67": Phase="Running", Reason="", readiness=true. Elapsed: 10.023429171s
+Jun 20 11:31:23.483: INFO: Pod "pod-subpath-test-configmap-sf67": Phase="Running", Reason="", readiness=true. Elapsed: 12.026654252s
+Jun 20 11:31:25.487: INFO: Pod "pod-subpath-test-configmap-sf67": Phase="Running", Reason="", readiness=true. Elapsed: 14.030637242s
+Jun 20 11:31:27.490: INFO: Pod "pod-subpath-test-configmap-sf67": Phase="Running", Reason="", readiness=true. Elapsed: 16.033696701s
+Jun 20 11:31:29.494: INFO: Pod "pod-subpath-test-configmap-sf67": Phase="Running", Reason="", readiness=true. Elapsed: 18.037219159s
+Jun 20 11:31:31.497: INFO: Pod "pod-subpath-test-configmap-sf67": Phase="Running", Reason="", readiness=true. Elapsed: 20.040742019s
+Jun 20 11:31:33.503: INFO: Pod "pod-subpath-test-configmap-sf67": Phase="Running", Reason="", readiness=true. Elapsed: 22.046704809s
+Jun 20 11:31:35.507: INFO: Pod "pod-subpath-test-configmap-sf67": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.050346688s
+STEP: Saw pod success
+Jun 20 11:31:35.507: INFO: Pod "pod-subpath-test-configmap-sf67" satisfied condition "success or failure"
+Jun 20 11:31:35.510: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-subpath-test-configmap-sf67 container test-container-subpath-configmap-sf67: 
+STEP: delete the pod
+Jun 20 11:31:35.531: INFO: Waiting for pod pod-subpath-test-configmap-sf67 to disappear
+Jun 20 11:31:35.533: INFO: Pod pod-subpath-test-configmap-sf67 no longer exists
+STEP: Deleting pod pod-subpath-test-configmap-sf67
+Jun 20 11:31:35.533: INFO: Deleting pod "pod-subpath-test-configmap-sf67" in namespace "subpath-1137"
+[AfterEach] [sig-storage] Subpath
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:31:35.536: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "subpath-1137" for this suite.
+Jun 20 11:31:41.548: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:31:41.621: INFO: namespace subpath-1137 deletion completed in 6.082207764s
+
+• [SLOW TEST:30.213 seconds]
+[sig-storage] Subpath
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22
+  Atomic writer volumes
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:33
+    should support subpaths with configmap pod with mountPath of existing file [LinuxOnly] [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Guestbook application 
+  should create and stop a working application  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:31:41.621: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:221
+[It] should create and stop a working application  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: creating all guestbook components
+Jun 20 11:31:41.652: INFO: apiVersion: v1
+kind: Service
+metadata:
+  name: redis-slave
+  labels:
+    app: redis
+    role: slave
+    tier: backend
+spec:
+  ports:
+  - port: 6379
+  selector:
+    app: redis
+    role: slave
+    tier: backend
+
+Jun 20 11:31:41.652: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 create -f - --namespace=kubectl-9880'
+Jun 20 11:31:41.891: INFO: stderr: ""
+Jun 20 11:31:41.891: INFO: stdout: "service/redis-slave created\n"
+Jun 20 11:31:41.891: INFO: apiVersion: v1
+kind: Service
+metadata:
+  name: redis-master
+  labels:
+    app: redis
+    role: master
+    tier: backend
+spec:
+  ports:
+  - port: 6379
+    targetPort: 6379
+  selector:
+    app: redis
+    role: master
+    tier: backend
+
+Jun 20 11:31:41.891: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 create -f - --namespace=kubectl-9880'
+Jun 20 11:31:42.127: INFO: stderr: ""
+Jun 20 11:31:42.127: INFO: stdout: "service/redis-master created\n"
+Jun 20 11:31:42.127: INFO: apiVersion: v1
+kind: Service
+metadata:
+  name: frontend
+  labels:
+    app: guestbook
+    tier: frontend
+spec:
+  # if your cluster supports it, uncomment the following to automatically create
+  # an external load-balanced IP for the frontend service.
+  # type: LoadBalancer
+  ports:
+  - port: 80
+  selector:
+    app: guestbook
+    tier: frontend
+
+Jun 20 11:31:42.127: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 create -f - --namespace=kubectl-9880'
+Jun 20 11:31:42.366: INFO: stderr: ""
+Jun 20 11:31:42.366: INFO: stdout: "service/frontend created\n"
+Jun 20 11:31:42.366: INFO: apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: frontend
+spec:
+  replicas: 3
+  selector:
+    matchLabels:
+      app: guestbook
+      tier: frontend
+  template:
+    metadata:
+      labels:
+        app: guestbook
+        tier: frontend
+    spec:
+      containers:
+      - name: php-redis
+        image: gcr.io/google-samples/gb-frontend:v6
+        resources:
+          requests:
+            cpu: 100m
+            memory: 100Mi
+        env:
+        - name: GET_HOSTS_FROM
+          value: dns
+          # If your cluster config does not include a dns service, then to
+          # instead access environment variables to find service host
+          # info, comment out the 'value: dns' line above, and uncomment the
+          # line below:
+          # value: env
+        ports:
+        - containerPort: 80
+
+Jun 20 11:31:42.366: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 create -f - --namespace=kubectl-9880'
+Jun 20 11:31:42.615: INFO: stderr: ""
+Jun 20 11:31:42.615: INFO: stdout: "deployment.apps/frontend created\n"
+Jun 20 11:31:42.615: INFO: apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: redis-master
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: redis
+      role: master
+      tier: backend
+  template:
+    metadata:
+      labels:
+        app: redis
+        role: master
+        tier: backend
+    spec:
+      containers:
+      - name: master
+        image: gcr.io/kubernetes-e2e-test-images/redis:1.0
+        resources:
+          requests:
+            cpu: 100m
+            memory: 100Mi
+        ports:
+        - containerPort: 6379
+
+Jun 20 11:31:42.615: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 create -f - --namespace=kubectl-9880'
+Jun 20 11:31:42.829: INFO: stderr: ""
+Jun 20 11:31:42.829: INFO: stdout: "deployment.apps/redis-master created\n"
+Jun 20 11:31:42.829: INFO: apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: redis-slave
+spec:
+  replicas: 2
+  selector:
+    matchLabels:
+      app: redis
+      role: slave
+      tier: backend
+  template:
+    metadata:
+      labels:
+        app: redis
+        role: slave
+        tier: backend
+    spec:
+      containers:
+      - name: slave
+        image: gcr.io/google-samples/gb-redisslave:v3
+        resources:
+          requests:
+            cpu: 100m
+            memory: 100Mi
+        env:
+        - name: GET_HOSTS_FROM
+          value: dns
+          # If your cluster config does not include a dns service, then to
+          # instead access an environment variable to find the master
+          # service's host, comment out the 'value: dns' line above, and
+          # uncomment the line below:
+          # value: env
+        ports:
+        - containerPort: 6379
+
+Jun 20 11:31:42.829: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 create -f - --namespace=kubectl-9880'
+Jun 20 11:31:43.055: INFO: stderr: ""
+Jun 20 11:31:43.055: INFO: stdout: "deployment.apps/redis-slave created\n"
+STEP: validating guestbook app
+Jun 20 11:31:43.055: INFO: Waiting for all frontend pods to be Running.
+Jun 20 11:31:58.106: INFO: Waiting for frontend to serve content.
+Jun 20 11:31:58.124: INFO: Trying to add a new entry to the guestbook.
+Jun 20 11:31:58.158: INFO: Verifying that added entry can be retrieved.
+STEP: using delete to clean up resources
+Jun 20 11:31:58.179: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 delete --grace-period=0 --force -f - --namespace=kubectl-9880'
+Jun 20 11:31:58.266: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+Jun 20 11:31:58.266: INFO: stdout: "service \"redis-slave\" force deleted\n"
+STEP: using delete to clean up resources
+Jun 20 11:31:58.266: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 delete --grace-period=0 --force -f - --namespace=kubectl-9880'
+Jun 20 11:31:58.386: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+Jun 20 11:31:58.386: INFO: stdout: "service \"redis-master\" force deleted\n"
+STEP: using delete to clean up resources
+Jun 20 11:31:58.386: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 delete --grace-period=0 --force -f - --namespace=kubectl-9880'
+Jun 20 11:31:58.494: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+Jun 20 11:31:58.494: INFO: stdout: "service \"frontend\" force deleted\n"
+STEP: using delete to clean up resources
+Jun 20 11:31:58.494: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 delete --grace-period=0 --force -f - --namespace=kubectl-9880'
+Jun 20 11:31:58.578: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+Jun 20 11:31:58.578: INFO: stdout: "deployment.apps \"frontend\" force deleted\n"
+STEP: using delete to clean up resources
+Jun 20 11:31:58.578: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 delete --grace-period=0 --force -f - --namespace=kubectl-9880'
+Jun 20 11:31:58.645: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+Jun 20 11:31:58.645: INFO: stdout: "deployment.apps \"redis-master\" force deleted\n"
+STEP: using delete to clean up resources
+Jun 20 11:31:58.646: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 delete --grace-period=0 --force -f - --namespace=kubectl-9880'
+Jun 20 11:31:58.711: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+Jun 20 11:31:58.711: INFO: stdout: "deployment.apps \"redis-slave\" force deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:31:58.712: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-9880" for this suite.
+Jun 20 11:32:38.725: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:32:38.795: INFO: namespace kubectl-9880 deletion completed in 40.079754503s
+
+• [SLOW TEST:57.173 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Guestbook application
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+    should create and stop a working application  [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl run pod 
+  should create a pod from an image when restart is Never  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:32:38.795: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:221
+[BeforeEach] [k8s.io] Kubectl run pod
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1686
+[It] should create a pod from an image when restart is Never  [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: running the image docker.io/library/nginx:1.14-alpine
+Jun 20 11:32:38.831: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 run e2e-test-nginx-pod --restart=Never --generator=run-pod/v1 --image=docker.io/library/nginx:1.14-alpine --namespace=kubectl-7594'
+Jun 20 11:32:38.908: INFO: stderr: ""
+Jun 20 11:32:38.908: INFO: stdout: "pod/e2e-test-nginx-pod created\n"
+STEP: verifying the pod e2e-test-nginx-pod was created
+[AfterEach] [k8s.io] Kubectl run pod
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1691
+Jun 20 11:32:38.911: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-878248618 delete pods e2e-test-nginx-pod --namespace=kubectl-7594'
+Jun 20 11:32:47.145: INFO: stderr: ""
+Jun 20 11:32:47.145: INFO: stdout: "pod \"e2e-test-nginx-pod\" deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:32:47.146: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-7594" for this suite.
+Jun 20 11:32:53.160: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:32:53.237: INFO: namespace kubectl-7594 deletion completed in 6.087380453s
+
+• [SLOW TEST:14.442 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl run pod
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+    should create a pod from an image when restart is Never  [Conformance]
+    /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Docker Containers 
+  should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:32:53.237: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename containers
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a pod to test override arguments
+Jun 20 11:32:53.277: INFO: Waiting up to 5m0s for pod "client-containers-03248c70-7c31-4945-a42e-437378ac91ad" in namespace "containers-9577" to be "success or failure"
+Jun 20 11:32:53.280: INFO: Pod "client-containers-03248c70-7c31-4945-a42e-437378ac91ad": Phase="Pending", Reason="", readiness=false. Elapsed: 3.501481ms
+Jun 20 11:32:55.283: INFO: Pod "client-containers-03248c70-7c31-4945-a42e-437378ac91ad": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006713701s
+STEP: Saw pod success
+Jun 20 11:32:55.283: INFO: Pod "client-containers-03248c70-7c31-4945-a42e-437378ac91ad" satisfied condition "success or failure"
+Jun 20 11:32:55.286: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod client-containers-03248c70-7c31-4945-a42e-437378ac91ad container test-container: 
+STEP: delete the pod
+Jun 20 11:32:55.305: INFO: Waiting for pod client-containers-03248c70-7c31-4945-a42e-437378ac91ad to disappear
+Jun 20 11:32:55.308: INFO: Pod client-containers-03248c70-7c31-4945-a42e-437378ac91ad no longer exists
+[AfterEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:32:55.308: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "containers-9577" for this suite.
+Jun 20 11:33:01.320: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:33:01.397: INFO: namespace containers-9577 deletion completed in 6.086369231s
+
+• [SLOW TEST:8.160 seconds]
+[k8s.io] Docker Containers
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+  should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSS
+------------------------------
+[sig-api-machinery] Namespaces [Serial] 
+  should ensure that all pods are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-api-machinery] Namespaces [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:33:01.397: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename namespaces
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should ensure that all pods are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating a test namespace
+STEP: Waiting for a default service account to be provisioned in namespace
+STEP: Creating a pod in the namespace
+STEP: Waiting for the pod to have running status
+STEP: Deleting the namespace
+STEP: Waiting for the namespace to be removed.
+STEP: Recreating the namespace
+STEP: Verifying there are no pods in the namespace
+[AfterEach] [sig-api-machinery] Namespaces [Serial]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:33:33.513: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "namespaces-4635" for this suite.
+Jun 20 11:33:39.529: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:33:39.598: INFO: namespace namespaces-4635 deletion completed in 6.080244139s
+STEP: Destroying namespace "nsdeletetest-5145" for this suite.
+Jun 20 11:33:39.600: INFO: Namespace nsdeletetest-5145 was already deleted
+STEP: Destroying namespace "nsdeletetest-7611" for this suite.
+Jun 20 11:33:45.610: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:33:45.676: INFO: namespace nsdeletetest-7611 deletion completed in 6.076882022s
+
+• [SLOW TEST:44.279 seconds]
+[sig-api-machinery] Namespaces [Serial]
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23
+  should ensure that all pods are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected configMap 
+  should be consumable from pods in volume with mappings as non-root [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+STEP: Creating a kubernetes client
+Jun 20 11:33:45.677: INFO: >>> kubeConfig: /tmp/kubeconfig-878248618
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings as non-root [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+STEP: Creating configMap with name projected-configmap-test-volume-map-d113e969-4267-4683-817d-c090b42fe189
+STEP: Creating a pod to test consume configMaps
+Jun 20 11:33:45.723: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-836d2d25-3484-49b2-9ae9-75fb9fe11c6c" in namespace "projected-2922" to be "success or failure"
+Jun 20 11:33:45.730: INFO: Pod "pod-projected-configmaps-836d2d25-3484-49b2-9ae9-75fb9fe11c6c": Phase="Pending", Reason="", readiness=false. Elapsed: 6.860156ms
+Jun 20 11:33:47.733: INFO: Pod "pod-projected-configmaps-836d2d25-3484-49b2-9ae9-75fb9fe11c6c": Phase="Pending", Reason="", readiness=false. Elapsed: 2.010180172s
+Jun 20 11:33:49.736: INFO: Pod "pod-projected-configmaps-836d2d25-3484-49b2-9ae9-75fb9fe11c6c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.0135636s
+STEP: Saw pod success
+Jun 20 11:33:49.736: INFO: Pod "pod-projected-configmaps-836d2d25-3484-49b2-9ae9-75fb9fe11c6c" satisfied condition "success or failure"
+Jun 20 11:33:49.739: INFO: Trying to get logs from node ip-10-100-10-111.eu-west-1.compute.internal pod pod-projected-configmaps-836d2d25-3484-49b2-9ae9-75fb9fe11c6c container projected-configmap-volume-test: 
+STEP: delete the pod
+Jun 20 11:33:49.755: INFO: Waiting for pod pod-projected-configmaps-836d2d25-3484-49b2-9ae9-75fb9fe11c6c to disappear
+Jun 20 11:33:49.758: INFO: Pod pod-projected-configmaps-836d2d25-3484-49b2-9ae9-75fb9fe11c6c no longer exists
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:151
+Jun 20 11:33:49.758: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-2922" for this suite.
+Jun 20 11:33:55.770: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun 20 11:33:55.836: INFO: namespace projected-2922 deletion completed in 6.076074475s
+
+• [SLOW TEST:10.160 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:33
+  should be consumable from pods in volume with mappings as non-root [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.15.0-rc.1.19+e8462b5b5dc258/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:697
+------------------------------
+SJun 20 11:33:55.837: INFO: Running AfterSuite actions on all nodes
+Jun 20 11:33:55.837: INFO: Running AfterSuite actions on node 1
+Jun 20 11:33:55.837: INFO: Skipping dumping logs from cluster
+
+Ran 215 of 4411 Specs in 5652.332 seconds
+SUCCESS! -- 215 Passed | 0 Failed | 0 Pending | 4196 Skipped
+PASS
+
+Ginkgo ran 1 suite in 1h34m14.442340112s
+Test Suite Passed
diff --git a/v1.15/kubernetes-fury-aws/junit_01.xml b/v1.15/kubernetes-fury-aws/junit_01.xml
new file mode 100644
index 0000000000..0302dc6c6d
--- /dev/null
+++ b/v1.15/kubernetes-fury-aws/junit_01.xml
@@ -0,0 +1,12806 @@
+
+  
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+  
\ No newline at end of file