diff --git a/v1.14/kublr/PRODUCT.yaml b/v1.14/kublr/PRODUCT.yaml new file mode 100644 index 0000000000..697d666322 --- /dev/null +++ b/v1.14/kublr/PRODUCT.yaml @@ -0,0 +1,8 @@ +vendor: Kublr +name: Kublr +version: v1.14.0 +website_url: https://kublr.com +documentation_url: https://docs.kublr.com +product_logo_url: https://assets.kublr.com/kublr-logo.svg +type: distribution +description: Kubernetes for the enterprise diff --git a/v1.14/kublr/README.md b/v1.14/kublr/README.md new file mode 100644 index 0000000000..771d66fd12 --- /dev/null +++ b/v1.14/kublr/README.md @@ -0,0 +1,30 @@ +# To reproduce + +## Set up the cluster + +1. Download and install Kublr Control Plane [documentation](https://kublr.com/deploy/) +1. Create new Kubernetes cluster using Kublr Control Plane. + +When the cluster is up and running, + +1. Download the kubeconfig file. +2. Set the `KUBECONFIG` environment variable `export KUBECONFIG=$(pwd)/kubeconfig`. + +## Run the conformance test + +Download a sonobuoy [binary release](https://github.com/heptio/sonobuoy/releases) of the CLI, or build it yourself by running: +```bash +$ go get -u -v github.com/heptio/sonobuoy + +$ sonobuoy run +``` + +Then the status commands `$ sonobuoy status` indicate that the execution is completed, you can download the results. +```bash +$ outfile=$(sonobuoy retrieve) +``` + +untar the tarball, then add plugins/e2e/results/{e2e.log,junit_01.xml} +```bash +$ mkdir ./results; tar xzf $outfile -C ./results +``` diff --git a/v1.14/kublr/e2e.log b/v1.14/kublr/e2e.log new file mode 100644 index 0000000000..92c93e60bc --- /dev/null +++ b/v1.14/kublr/e2e.log @@ -0,0 +1,10749 @@ +I0606 12:54:52.257158 14 test_context.go:405] Using a temporary kubeconfig file from in-cluster config : /tmp/kubeconfig-041581163 +I0606 12:54:52.257258 14 e2e.go:240] Starting e2e run "45bf6e69-885a-11e9-b613-8a9bc7c14a19" on Ginkgo node 1 +Running Suite: Kubernetes e2e suite +=================================== +Random Seed: 1559825690 - Will randomize all specs +Will run 204 of 3585 specs + +Jun 6 12:54:52.438: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +Jun 6 12:54:52.440: INFO: Waiting up to 30m0s for all (but 0) nodes to be schedulable +Jun 6 12:54:52.450: INFO: Waiting up to 10m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready +Jun 6 12:54:52.492: INFO: 32 / 32 pods in namespace 'kube-system' are running and ready (0 seconds elapsed) +Jun 6 12:54:52.492: INFO: expected 11 pod replicas in namespace 'kube-system', 11 are Running and Ready. +Jun 6 12:54:52.492: INFO: Waiting up to 5m0s for all daemonsets in namespace 'kube-system' to start +Jun 6 12:54:52.502: INFO: 3 / 3 pods ready in namespace 'kube-system' in daemonset 'canal' (0 seconds elapsed) +Jun 6 12:54:52.502: INFO: 3 / 3 pods ready in namespace 'kube-system' in daemonset 'kublr-logging-fluentd-es-v2.0.2' (0 seconds elapsed) +Jun 6 12:54:52.502: INFO: 3 / 3 pods ready in namespace 'kube-system' in daemonset 'node-local-dns' (0 seconds elapsed) +Jun 6 12:54:52.502: INFO: e2e test version: v1.14.2 +Jun 6 12:54:52.503: INFO: kube-apiserver version: v1.14.2 +S +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl label + should update the label on a resource [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 12:54:52.503: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename kubectl +Jun 6 12:54:52.538: INFO: Found PodSecurityPolicies; assuming PodSecurityPolicy is enabled. +Jun 6 12:54:52.549: INFO: Found ClusterRoles; assuming RBAC is enabled. +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in kubectl-5626 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213 +[BeforeEach] [k8s.io] Kubectl label + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1108 +STEP: creating the pod +Jun 6 12:54:52.665: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 create -f - --namespace=kubectl-5626' +Jun 6 12:54:53.140: INFO: stderr: "" +Jun 6 12:54:53.140: INFO: stdout: "pod/pause created\n" +Jun 6 12:54:53.140: INFO: Waiting up to 5m0s for 1 pods to be running and ready: [pause] +Jun 6 12:54:53.141: INFO: Waiting up to 5m0s for pod "pause" in namespace "kubectl-5626" to be "running and ready" +Jun 6 12:54:53.145: INFO: Pod "pause": Phase="Pending", Reason="", readiness=false. Elapsed: 4.746282ms +Jun 6 12:54:55.148: INFO: Pod "pause": Phase="Running", Reason="", readiness=true. Elapsed: 2.00710353s +Jun 6 12:54:55.148: INFO: Pod "pause" satisfied condition "running and ready" +Jun 6 12:54:55.148: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [pause] +[It] should update the label on a resource [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: adding the label testing-label with value testing-label-value to a pod +Jun 6 12:54:55.148: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 label pods pause testing-label=testing-label-value --namespace=kubectl-5626' +Jun 6 12:54:55.216: INFO: stderr: "" +Jun 6 12:54:55.216: INFO: stdout: "pod/pause labeled\n" +STEP: verifying the pod has the label testing-label with the value testing-label-value +Jun 6 12:54:55.217: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pod pause -L testing-label --namespace=kubectl-5626' +Jun 6 12:54:55.283: INFO: stderr: "" +Jun 6 12:54:55.283: INFO: stdout: "NAME READY STATUS RESTARTS AGE TESTING-LABEL\npause 1/1 Running 0 2s testing-label-value\n" +STEP: removing the label testing-label of a pod +Jun 6 12:54:55.283: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 label pods pause testing-label- --namespace=kubectl-5626' +Jun 6 12:54:55.350: INFO: stderr: "" +Jun 6 12:54:55.350: INFO: stdout: "pod/pause labeled\n" +STEP: verifying the pod doesn't have the label testing-label +Jun 6 12:54:55.350: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pod pause -L testing-label --namespace=kubectl-5626' +Jun 6 12:54:55.412: INFO: stderr: "" +Jun 6 12:54:55.412: INFO: stdout: "NAME READY STATUS RESTARTS AGE TESTING-LABEL\npause 1/1 Running 0 2s \n" +[AfterEach] [k8s.io] Kubectl label + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1115 +STEP: using delete to clean up resources +Jun 6 12:54:55.412: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 delete --grace-period=0 --force -f - --namespace=kubectl-5626' +Jun 6 12:54:55.479: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Jun 6 12:54:55.479: INFO: stdout: "pod \"pause\" force deleted\n" +Jun 6 12:54:55.479: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get rc,svc -l name=pause --no-headers --namespace=kubectl-5626' +Jun 6 12:54:55.545: INFO: stderr: "No resources found.\n" +Jun 6 12:54:55.545: INFO: stdout: "" +Jun 6 12:54:55.545: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods -l name=pause --namespace=kubectl-5626 -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' +Jun 6 12:54:55.605: INFO: stderr: "" +Jun 6 12:54:55.605: INFO: stdout: "" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 12:54:55.605: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-5626" for this suite. +Jun 6 12:55:01.615: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 12:55:01.710: INFO: namespace kubectl-5626 deletion completed in 6.102073826s + +• [SLOW TEST:9.207 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + [k8s.io] Kubectl label + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should update the label on a resource [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSS +------------------------------ +[k8s.io] InitContainer [NodeConformance] + should invoke init containers on a RestartAlways pod [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 12:55:01.711: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename init-container +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in init-container-5788 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:43 +[It] should invoke init containers on a RestartAlways pod [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: creating the pod +Jun 6 12:55:01.840: INFO: PodSpec: initContainers in spec.initContainers +[AfterEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 12:55:07.214: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "init-container-5788" for this suite. +Jun 6 12:55:29.224: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 12:55:29.340: INFO: namespace init-container-5788 deletion completed in 22.123234497s + +• [SLOW TEST:27.630 seconds] +[k8s.io] InitContainer [NodeConformance] +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should invoke init containers on a RestartAlways pod [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSS +------------------------------ +[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 12:55:29.340: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename statefulset +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in statefulset-2479 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:59 +[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:74 +STEP: Creating service test in namespace statefulset-2479 +[It] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Initializing watcher for selector baz=blah,foo=bar +STEP: Creating stateful set ss in namespace statefulset-2479 +STEP: Waiting until all stateful set ss replicas will be running in namespace statefulset-2479 +Jun 6 12:55:29.505: INFO: Found 0 stateful pods, waiting for 1 +Jun 6 12:55:39.507: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +STEP: Confirming that stateful set scale up will halt with unhealthy stateful pod +Jun 6 12:55:39.509: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-2479 ss-0 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +Jun 6 12:55:39.766: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n" +Jun 6 12:55:39.766: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +Jun 6 12:55:39.766: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-0: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +Jun 6 12:55:39.769: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=true +Jun 6 12:55:49.771: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false +Jun 6 12:55:49.771: INFO: Waiting for statefulset status.replicas updated to 0 +Jun 6 12:55:49.781: INFO: Verifying statefulset ss doesn't scale past 1 for another 9.999999123s +Jun 6 12:55:50.783: INFO: Verifying statefulset ss doesn't scale past 1 for another 8.997059664s +Jun 6 12:55:51.819: INFO: Verifying statefulset ss doesn't scale past 1 for another 7.994716153s +Jun 6 12:55:52.878: INFO: Verifying statefulset ss doesn't scale past 1 for another 6.958755814s +Jun 6 12:55:53.881: INFO: Verifying statefulset ss doesn't scale past 1 for another 5.899690444s +Jun 6 12:55:54.884: INFO: Verifying statefulset ss doesn't scale past 1 for another 4.896895466s +Jun 6 12:55:55.886: INFO: Verifying statefulset ss doesn't scale past 1 for another 3.894307294s +Jun 6 12:55:56.889: INFO: Verifying statefulset ss doesn't scale past 1 for another 2.891525592s +Jun 6 12:55:57.892: INFO: Verifying statefulset ss doesn't scale past 1 for another 1.888956826s +Jun 6 12:55:58.894: INFO: Verifying statefulset ss doesn't scale past 1 for another 886.389029ms +STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-2479 +Jun 6 12:55:59.897: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-2479 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 12:56:00.154: INFO: stderr: "+ mv -v /tmp/index.html /usr/share/nginx/html/\n" +Jun 6 12:56:00.154: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n" +Jun 6 12:56:00.154: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-0: '/tmp/index.html' -> '/usr/share/nginx/html/index.html' + +Jun 6 12:56:00.156: INFO: Found 1 stateful pods, waiting for 3 +Jun 6 12:56:10.220: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +Jun 6 12:56:10.220: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true +Jun 6 12:56:10.220: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Verifying that stateful set ss was scaled up in order +STEP: Scale down will halt with unhealthy stateful pod +Jun 6 12:56:10.224: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-2479 ss-0 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +Jun 6 12:56:10.426: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n" +Jun 6 12:56:10.426: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +Jun 6 12:56:10.426: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-0: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +Jun 6 12:56:10.426: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-2479 ss-1 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +Jun 6 12:56:10.720: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n" +Jun 6 12:56:10.720: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +Jun 6 12:56:10.720: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-1: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +Jun 6 12:56:10.720: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-2479 ss-2 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +Jun 6 12:56:10.978: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n" +Jun 6 12:56:10.978: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +Jun 6 12:56:10.978: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-2: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +Jun 6 12:56:10.978: INFO: Waiting for statefulset status.replicas updated to 0 +Jun 6 12:56:10.980: INFO: Waiting for stateful set status.readyReplicas to become 0, currently 2 +Jun 6 12:56:20.984: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false +Jun 6 12:56:20.984: INFO: Waiting for pod ss-1 to enter Running - Ready=false, currently Running - Ready=false +Jun 6 12:56:20.984: INFO: Waiting for pod ss-2 to enter Running - Ready=false, currently Running - Ready=false +Jun 6 12:56:20.994: INFO: Verifying statefulset ss doesn't scale past 3 for another 9.999999097s +Jun 6 12:56:21.997: INFO: Verifying statefulset ss doesn't scale past 3 for another 8.996112763s +Jun 6 12:56:23.000: INFO: Verifying statefulset ss doesn't scale past 3 for another 7.993265406s +Jun 6 12:56:24.003: INFO: Verifying statefulset ss doesn't scale past 3 for another 6.990462473s +Jun 6 12:56:25.005: INFO: Verifying statefulset ss doesn't scale past 3 for another 5.987439222s +Jun 6 12:56:26.008: INFO: Verifying statefulset ss doesn't scale past 3 for another 4.984724992s +Jun 6 12:56:27.011: INFO: Verifying statefulset ss doesn't scale past 3 for another 3.98175808s +Jun 6 12:56:28.024: INFO: Verifying statefulset ss doesn't scale past 3 for another 2.979062649s +Jun 6 12:56:29.027: INFO: Verifying statefulset ss doesn't scale past 3 for another 1.96646206s +Jun 6 12:56:30.032: INFO: Verifying statefulset ss doesn't scale past 3 for another 963.231308ms +STEP: Scaling down stateful set ss to 0 replicas and waiting until none of pods will run in namespacestatefulset-2479 +Jun 6 12:56:31.035: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-2479 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 12:56:31.300: INFO: stderr: "+ mv -v /tmp/index.html /usr/share/nginx/html/\n" +Jun 6 12:56:31.300: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n" +Jun 6 12:56:31.300: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-0: '/tmp/index.html' -> '/usr/share/nginx/html/index.html' + +Jun 6 12:56:31.300: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-2479 ss-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 12:56:31.561: INFO: stderr: "+ mv -v /tmp/index.html /usr/share/nginx/html/\n" +Jun 6 12:56:31.561: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n" +Jun 6 12:56:31.561: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-1: '/tmp/index.html' -> '/usr/share/nginx/html/index.html' + +Jun 6 12:56:31.561: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-2479 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 12:56:31.794: INFO: stderr: "+ mv -v /tmp/index.html /usr/share/nginx/html/\n" +Jun 6 12:56:31.794: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n" +Jun 6 12:56:31.794: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-2: '/tmp/index.html' -> '/usr/share/nginx/html/index.html' + +Jun 6 12:56:31.794: INFO: Scaling statefulset ss to 0 +STEP: Verifying that stateful set ss was scaled down in reverse order +[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:85 +Jun 6 12:57:01.807: INFO: Deleting all statefulset in ns statefulset-2479 +Jun 6 12:57:01.808: INFO: Scaling statefulset ss to 0 +Jun 6 12:57:01.814: INFO: Waiting for statefulset status.replicas updated to 0 +Jun 6 12:57:01.816: INFO: Deleting statefulset ss +[AfterEach] [sig-apps] StatefulSet + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 12:57:01.823: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "statefulset-2479" for this suite. +Jun 6 12:57:07.845: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 12:57:07.938: INFO: namespace statefulset-2479 deletion completed in 6.102708064s + +• [SLOW TEST:98.598 seconds] +[sig-apps] StatefulSet +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl run deployment + should create a deployment from an image [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 12:57:07.938: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in kubectl-4371 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213 +[BeforeEach] [k8s.io] Kubectl run deployment + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1455 +[It] should create a deployment from an image [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: running the image docker.io/library/nginx:1.14-alpine +Jun 6 12:57:08.065: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 run e2e-test-nginx-deployment --image=docker.io/library/nginx:1.14-alpine --generator=deployment/v1beta1 --namespace=kubectl-4371' +Jun 6 12:57:08.142: INFO: stderr: "kubectl run --generator=deployment/v1beta1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n" +Jun 6 12:57:08.142: INFO: stdout: "deployment.extensions/e2e-test-nginx-deployment created\n" +STEP: verifying the deployment e2e-test-nginx-deployment was created +STEP: verifying the pod controlled by deployment e2e-test-nginx-deployment was created +[AfterEach] [k8s.io] Kubectl run deployment + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1460 +Jun 6 12:57:10.165: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 delete deployment e2e-test-nginx-deployment --namespace=kubectl-4371' +Jun 6 12:57:10.235: INFO: stderr: "" +Jun 6 12:57:10.235: INFO: stdout: "deployment.extensions \"e2e-test-nginx-deployment\" deleted\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 12:57:10.235: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-4371" for this suite. +Jun 6 12:59:12.247: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 12:59:12.460: INFO: namespace kubectl-4371 deletion completed in 2m2.222267166s + +• [SLOW TEST:124.522 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + [k8s.io] Kubectl run deployment + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should create a deployment from an image [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 12:59:12.460: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename emptydir +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in emptydir-4436 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a pod to test emptydir 0777 on node default medium +Jun 6 12:59:12.590: INFO: Waiting up to 5m0s for pod "pod-e1a85e70-885a-11e9-b613-8a9bc7c14a19" in namespace "emptydir-4436" to be "success or failure" +Jun 6 12:59:12.592: INFO: Pod "pod-e1a85e70-885a-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.294129ms +Jun 6 12:59:14.594: INFO: Pod "pod-e1a85e70-885a-11e9-b613-8a9bc7c14a19": Phase="Running", Reason="", readiness=true. Elapsed: 2.004540056s +Jun 6 12:59:16.598: INFO: Pod "pod-e1a85e70-885a-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.008320359s +STEP: Saw pod success +Jun 6 12:59:16.598: INFO: Pod "pod-e1a85e70-885a-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure" +Jun 6 12:59:16.600: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-e1a85e70-885a-11e9-b613-8a9bc7c14a19 container test-container: +STEP: delete the pod +Jun 6 12:59:16.615: INFO: Waiting for pod pod-e1a85e70-885a-11e9-b613-8a9bc7c14a19 to disappear +Jun 6 12:59:16.616: INFO: Pod pod-e1a85e70-885a-11e9-b613-8a9bc7c14a19 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 12:59:16.616: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-4436" for this suite. +Jun 6 12:59:22.624: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 12:59:22.761: INFO: namespace emptydir-4436 deletion completed in 6.143051708s + +• [SLOW TEST:10.301 seconds] +[sig-storage] EmptyDir volumes +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41 + should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSS +------------------------------ +[sig-storage] Projected secret + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Projected secret + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 12:59:22.762: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-1434 +STEP: Waiting for a default service account to be provisioned in namespace +[It] optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating secret with name s-test-opt-del-e7cc7540-885a-11e9-b613-8a9bc7c14a19 +STEP: Creating secret with name s-test-opt-upd-e7cc7582-885a-11e9-b613-8a9bc7c14a19 +STEP: Creating the pod +STEP: Deleting secret s-test-opt-del-e7cc7540-885a-11e9-b613-8a9bc7c14a19 +STEP: Updating secret s-test-opt-upd-e7cc7582-885a-11e9-b613-8a9bc7c14a19 +STEP: Creating secret with name s-test-opt-create-e7cc75a0-885a-11e9-b613-8a9bc7c14a19 +STEP: waiting to observe update in volume +[AfterEach] [sig-storage] Projected secret + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 12:59:30.967: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-1434" for this suite. +Jun 6 12:59:52.976: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 12:59:53.042: INFO: namespace projected-1434 deletion completed in 22.073810089s + +• [SLOW TEST:30.281 seconds] +[sig-storage] Projected secret +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:33 + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Update Demo + should do a rolling update of a replication controller [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 12:59:53.042: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in kubectl-9131 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213 +[BeforeEach] [k8s.io] Update Demo + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:265 +[It] should do a rolling update of a replication controller [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: creating the initial replication controller +Jun 6 12:59:53.170: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 create -f - --namespace=kubectl-9131' +Jun 6 12:59:53.323: INFO: stderr: "" +Jun 6 12:59:53.323: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n" +STEP: waiting for all containers in name=update-demo pods to come up. +Jun 6 12:59:53.324: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-9131' +Jun 6 12:59:53.409: INFO: stderr: "" +Jun 6 12:59:53.409: INFO: stdout: "update-demo-nautilus-592kk update-demo-nautilus-xq86c " +Jun 6 12:59:53.409: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-nautilus-592kk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-9131' +Jun 6 12:59:53.483: INFO: stderr: "" +Jun 6 12:59:53.483: INFO: stdout: "" +Jun 6 12:59:53.483: INFO: update-demo-nautilus-592kk is created but not running +Jun 6 12:59:58.483: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-9131' +Jun 6 12:59:58.546: INFO: stderr: "" +Jun 6 12:59:58.546: INFO: stdout: "update-demo-nautilus-592kk update-demo-nautilus-xq86c " +Jun 6 12:59:58.546: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-nautilus-592kk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-9131' +Jun 6 12:59:58.611: INFO: stderr: "" +Jun 6 12:59:58.611: INFO: stdout: "true" +Jun 6 12:59:58.611: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-nautilus-592kk -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-9131' +Jun 6 12:59:58.669: INFO: stderr: "" +Jun 6 12:59:58.669: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" +Jun 6 12:59:58.669: INFO: validating pod update-demo-nautilus-592kk +Jun 6 12:59:58.674: INFO: got data: { + "image": "nautilus.jpg" +} + +Jun 6 12:59:58.674: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Jun 6 12:59:58.674: INFO: update-demo-nautilus-592kk is verified up and running +Jun 6 12:59:58.674: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-nautilus-xq86c -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-9131' +Jun 6 12:59:58.734: INFO: stderr: "" +Jun 6 12:59:58.734: INFO: stdout: "true" +Jun 6 12:59:58.734: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-nautilus-xq86c -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-9131' +Jun 6 12:59:58.792: INFO: stderr: "" +Jun 6 12:59:58.792: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" +Jun 6 12:59:58.792: INFO: validating pod update-demo-nautilus-xq86c +Jun 6 12:59:58.796: INFO: got data: { + "image": "nautilus.jpg" +} + +Jun 6 12:59:58.796: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Jun 6 12:59:58.796: INFO: update-demo-nautilus-xq86c is verified up and running +STEP: rolling-update to new replication controller +Jun 6 12:59:58.798: INFO: scanned /root for discovery docs: +Jun 6 12:59:58.798: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 rolling-update update-demo-nautilus --update-period=1s -f - --namespace=kubectl-9131' +Jun 6 13:00:20.151: INFO: stderr: "Command \"rolling-update\" is deprecated, use \"rollout\" instead\n" +Jun 6 13:00:20.152: INFO: stdout: "Created update-demo-kitten\nScaling up update-demo-kitten from 0 to 2, scaling down update-demo-nautilus from 2 to 0 (keep 2 pods available, don't exceed 3 pods)\nScaling update-demo-kitten up to 1\nScaling update-demo-nautilus down to 1\nScaling update-demo-kitten up to 2\nScaling update-demo-nautilus down to 0\nUpdate succeeded. Deleting old controller: update-demo-nautilus\nRenaming update-demo-kitten to update-demo-nautilus\nreplicationcontroller/update-demo-nautilus rolling updated\n" +STEP: waiting for all containers in name=update-demo pods to come up. +Jun 6 13:00:20.152: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-9131' +Jun 6 13:00:20.222: INFO: stderr: "" +Jun 6 13:00:20.223: INFO: stdout: "update-demo-kitten-fsgvp update-demo-kitten-gprgf " +Jun 6 13:00:20.223: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-kitten-fsgvp -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-9131' +Jun 6 13:00:20.282: INFO: stderr: "" +Jun 6 13:00:20.282: INFO: stdout: "true" +Jun 6 13:00:20.282: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-kitten-fsgvp -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-9131' +Jun 6 13:00:20.354: INFO: stderr: "" +Jun 6 13:00:20.354: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/kitten:1.0" +Jun 6 13:00:20.354: INFO: validating pod update-demo-kitten-fsgvp +Jun 6 13:00:20.357: INFO: got data: { + "image": "kitten.jpg" +} + +Jun 6 13:00:20.357: INFO: Unmarshalled json jpg/img => {kitten.jpg} , expecting kitten.jpg . +Jun 6 13:00:20.357: INFO: update-demo-kitten-fsgvp is verified up and running +Jun 6 13:00:20.357: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-kitten-gprgf -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-9131' +Jun 6 13:00:20.418: INFO: stderr: "" +Jun 6 13:00:20.418: INFO: stdout: "true" +Jun 6 13:00:20.418: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-kitten-gprgf -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-9131' +Jun 6 13:00:20.483: INFO: stderr: "" +Jun 6 13:00:20.483: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/kitten:1.0" +Jun 6 13:00:20.483: INFO: validating pod update-demo-kitten-gprgf +Jun 6 13:00:20.488: INFO: got data: { + "image": "kitten.jpg" +} + +Jun 6 13:00:20.488: INFO: Unmarshalled json jpg/img => {kitten.jpg} , expecting kitten.jpg . +Jun 6 13:00:20.488: INFO: update-demo-kitten-gprgf is verified up and running +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:00:20.488: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-9131" for this suite. +Jun 6 13:00:42.497: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:00:42.560: INFO: namespace kubectl-9131 deletion completed in 22.069505464s + +• [SLOW TEST:49.517 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + [k8s.io] Update Demo + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should do a rolling update of a replication controller [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSS +------------------------------ +[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook + should execute poststart exec hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [k8s.io] Container Lifecycle Hook + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:00:42.560: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename container-lifecycle-hook +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in container-lifecycle-hook-373 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] when create a pod with lifecycle hook + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:61 +STEP: create the container to handle the HTTPGet hook request. +[It] should execute poststart exec hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: create the pod with lifecycle hook +STEP: check poststart hook +STEP: delete the pod with lifecycle hook +Jun 6 13:00:50.753: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +Jun 6 13:00:50.755: INFO: Pod pod-with-poststart-exec-hook still exists +Jun 6 13:00:52.755: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +Jun 6 13:00:52.757: INFO: Pod pod-with-poststart-exec-hook still exists +Jun 6 13:00:54.755: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +Jun 6 13:00:54.757: INFO: Pod pod-with-poststart-exec-hook still exists +Jun 6 13:00:56.755: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +Jun 6 13:00:56.757: INFO: Pod pod-with-poststart-exec-hook still exists +Jun 6 13:00:58.755: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +Jun 6 13:00:58.757: INFO: Pod pod-with-poststart-exec-hook still exists +Jun 6 13:01:00.755: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +Jun 6 13:01:00.757: INFO: Pod pod-with-poststart-exec-hook still exists +Jun 6 13:01:02.755: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +Jun 6 13:01:02.758: INFO: Pod pod-with-poststart-exec-hook still exists +Jun 6 13:01:04.755: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +Jun 6 13:01:04.758: INFO: Pod pod-with-poststart-exec-hook still exists +Jun 6 13:01:06.755: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +Jun 6 13:01:06.758: INFO: Pod pod-with-poststart-exec-hook still exists +Jun 6 13:01:08.755: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +Jun 6 13:01:08.758: INFO: Pod pod-with-poststart-exec-hook still exists +Jun 6 13:01:10.755: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +Jun 6 13:01:10.758: INFO: Pod pod-with-poststart-exec-hook still exists +Jun 6 13:01:12.755: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +Jun 6 13:01:12.758: INFO: Pod pod-with-poststart-exec-hook still exists +Jun 6 13:01:14.755: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +Jun 6 13:01:14.758: INFO: Pod pod-with-poststart-exec-hook still exists +Jun 6 13:01:16.755: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +Jun 6 13:01:16.757: INFO: Pod pod-with-poststart-exec-hook no longer exists +[AfterEach] [k8s.io] Container Lifecycle Hook + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:01:16.757: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-lifecycle-hook-373" for this suite. +Jun 6 13:01:38.769: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:01:38.842: INFO: namespace container-lifecycle-hook-373 deletion completed in 22.082281332s + +• [SLOW TEST:56.282 seconds] +[k8s.io] Container Lifecycle Hook +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + when create a pod with lifecycle hook + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:40 + should execute poststart exec hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Downward API volume + should update labels on modification [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:01:38.842: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename downward-api +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in downward-api-3958 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39 +[It] should update labels on modification [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating the pod +Jun 6 13:01:41.522: INFO: Successfully updated pod "labelsupdate38e8b77e-885b-11e9-b613-8a9bc7c14a19" +[AfterEach] [sig-storage] Downward API volume + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:01:43.533: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-3958" for this suite. +Jun 6 13:02:05.543: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:02:05.661: INFO: namespace downward-api-3958 deletion completed in 22.125021249s + +• [SLOW TEST:26.818 seconds] +[sig-storage] Downward API volume +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34 + should update labels on modification [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +S +------------------------------ +[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook + should execute prestop http hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [k8s.io] Container Lifecycle Hook + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:02:05.661: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename container-lifecycle-hook +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in container-lifecycle-hook-1580 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] when create a pod with lifecycle hook + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:61 +STEP: create the container to handle the HTTPGet hook request. +[It] should execute prestop http hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: create the pod with lifecycle hook +STEP: delete the pod with lifecycle hook +Jun 6 13:02:09.834: INFO: Waiting for pod pod-with-prestop-http-hook to disappear +Jun 6 13:02:09.836: INFO: Pod pod-with-prestop-http-hook still exists +Jun 6 13:02:11.836: INFO: Waiting for pod pod-with-prestop-http-hook to disappear +Jun 6 13:02:11.921: INFO: Pod pod-with-prestop-http-hook still exists +Jun 6 13:02:13.836: INFO: Waiting for pod pod-with-prestop-http-hook to disappear +Jun 6 13:02:13.838: INFO: Pod pod-with-prestop-http-hook still exists +Jun 6 13:02:15.836: INFO: Waiting for pod pod-with-prestop-http-hook to disappear +Jun 6 13:02:15.838: INFO: Pod pod-with-prestop-http-hook no longer exists +STEP: check prestop hook +[AfterEach] [k8s.io] Container Lifecycle Hook + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:02:15.844: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-lifecycle-hook-1580" for this suite. +Jun 6 13:02:37.861: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:02:37.968: INFO: namespace container-lifecycle-hook-1580 deletion completed in 22.114787801s + +• [SLOW TEST:32.307 seconds] +[k8s.io] Container Lifecycle Hook +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + when create a pod with lifecycle hook + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:40 + should execute prestop http hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSS +------------------------------ +[sig-storage] EmptyDir wrapper volumes + should not cause race condition when used for configmaps [Serial] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] EmptyDir wrapper volumes + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:02:37.968: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename emptydir-wrapper +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in emptydir-wrapper-2452 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should not cause race condition when used for configmaps [Serial] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating 50 configmaps +STEP: Creating RC which spawns configmap-volume pods +Jun 6 13:02:38.357: INFO: Pod name wrapped-volume-race-5c35f367-885b-11e9-b613-8a9bc7c14a19: Found 4 pods out of 5 +Jun 6 13:02:43.361: INFO: Pod name wrapped-volume-race-5c35f367-885b-11e9-b613-8a9bc7c14a19: Found 5 pods out of 5 +STEP: Ensuring each pod is running +STEP: deleting ReplicationController wrapped-volume-race-5c35f367-885b-11e9-b613-8a9bc7c14a19 in namespace emptydir-wrapper-2452, will wait for the garbage collector to delete the pods +Jun 6 13:02:53.435: INFO: Deleting ReplicationController wrapped-volume-race-5c35f367-885b-11e9-b613-8a9bc7c14a19 took: 5.394381ms +Jun 6 13:02:53.835: INFO: Terminating ReplicationController wrapped-volume-race-5c35f367-885b-11e9-b613-8a9bc7c14a19 pods took: 400.202368ms +STEP: Creating RC which spawns configmap-volume pods +Jun 6 13:03:36.970: INFO: Pod name wrapped-volume-race-7f3a1543-885b-11e9-b613-8a9bc7c14a19: Found 0 pods out of 5 +Jun 6 13:03:41.974: INFO: Pod name wrapped-volume-race-7f3a1543-885b-11e9-b613-8a9bc7c14a19: Found 5 pods out of 5 +STEP: Ensuring each pod is running +STEP: deleting ReplicationController wrapped-volume-race-7f3a1543-885b-11e9-b613-8a9bc7c14a19 in namespace emptydir-wrapper-2452, will wait for the garbage collector to delete the pods +Jun 6 13:03:54.058: INFO: Deleting ReplicationController wrapped-volume-race-7f3a1543-885b-11e9-b613-8a9bc7c14a19 took: 6.751225ms +Jun 6 13:03:54.459: INFO: Terminating ReplicationController wrapped-volume-race-7f3a1543-885b-11e9-b613-8a9bc7c14a19 pods took: 400.384233ms +STEP: Creating RC which spawns configmap-volume pods +Jun 6 13:04:35.871: INFO: Pod name wrapped-volume-race-a2581b1a-885b-11e9-b613-8a9bc7c14a19: Found 0 pods out of 5 +Jun 6 13:04:40.875: INFO: Pod name wrapped-volume-race-a2581b1a-885b-11e9-b613-8a9bc7c14a19: Found 5 pods out of 5 +STEP: Ensuring each pod is running +STEP: deleting ReplicationController wrapped-volume-race-a2581b1a-885b-11e9-b613-8a9bc7c14a19 in namespace emptydir-wrapper-2452, will wait for the garbage collector to delete the pods +Jun 6 13:04:52.948: INFO: Deleting ReplicationController wrapped-volume-race-a2581b1a-885b-11e9-b613-8a9bc7c14a19 took: 4.544282ms +Jun 6 13:04:53.348: INFO: Terminating ReplicationController wrapped-volume-race-a2581b1a-885b-11e9-b613-8a9bc7c14a19 pods took: 400.320826ms +STEP: Cleaning up the configMaps +[AfterEach] [sig-storage] EmptyDir wrapper volumes + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:05:36.216: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-wrapper-2452" for this suite. +Jun 6 13:05:42.226: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:05:42.291: INFO: namespace emptydir-wrapper-2452 deletion completed in 6.072481729s + +• [SLOW TEST:184.323 seconds] +[sig-storage] EmptyDir wrapper volumes +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22 + should not cause race condition when used for configmaps [Serial] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:05:42.292: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename emptydir +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in emptydir-1575 +STEP: Waiting for a default service account to be provisioned in namespace +[It] volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a pod to test emptydir volume type on tmpfs +Jun 6 13:05:42.469: INFO: Waiting up to 5m0s for pod "pod-ca0b1453-885b-11e9-b613-8a9bc7c14a19" in namespace "emptydir-1575" to be "success or failure" +Jun 6 13:05:42.474: INFO: Pod "pod-ca0b1453-885b-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 4.830812ms +Jun 6 13:05:44.477: INFO: Pod "pod-ca0b1453-885b-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.007342534s +STEP: Saw pod success +Jun 6 13:05:44.477: INFO: Pod "pod-ca0b1453-885b-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure" +Jun 6 13:05:44.478: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-ca0b1453-885b-11e9-b613-8a9bc7c14a19 container test-container: +STEP: delete the pod +Jun 6 13:05:44.523: INFO: Waiting for pod pod-ca0b1453-885b-11e9-b613-8a9bc7c14a19 to disappear +Jun 6 13:05:44.527: INFO: Pod pod-ca0b1453-885b-11e9-b613-8a9bc7c14a19 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:05:44.527: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-1575" for this suite. +Jun 6 13:05:50.537: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:05:50.662: INFO: namespace emptydir-1575 deletion completed in 6.132643041s + +• [SLOW TEST:8.370 seconds] +[sig-storage] EmptyDir volumes +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41 + volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSS +------------------------------ +[sig-storage] Subpath Atomic writer volumes + should support subpaths with configmap pod with mountPath of existing file [LinuxOnly] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Subpath + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:05:50.662: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename subpath +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in subpath-2792 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] Atomic writer volumes + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38 +STEP: Setting up data +[It] should support subpaths with configmap pod with mountPath of existing file [LinuxOnly] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating pod pod-subpath-test-configmap-7msw +STEP: Creating a pod to test atomic-volume-subpath +Jun 6 13:05:50.798: INFO: Waiting up to 5m0s for pod "pod-subpath-test-configmap-7msw" in namespace "subpath-2792" to be "success or failure" +Jun 6 13:05:50.803: INFO: Pod "pod-subpath-test-configmap-7msw": Phase="Pending", Reason="", readiness=false. Elapsed: 4.338994ms +Jun 6 13:05:52.805: INFO: Pod "pod-subpath-test-configmap-7msw": Phase="Pending", Reason="", readiness=false. Elapsed: 2.00686554s +Jun 6 13:05:54.809: INFO: Pod "pod-subpath-test-configmap-7msw": Phase="Running", Reason="", readiness=true. Elapsed: 4.010510961s +Jun 6 13:05:56.811: INFO: Pod "pod-subpath-test-configmap-7msw": Phase="Running", Reason="", readiness=true. Elapsed: 6.012987312s +Jun 6 13:05:58.820: INFO: Pod "pod-subpath-test-configmap-7msw": Phase="Running", Reason="", readiness=true. Elapsed: 8.021650417s +Jun 6 13:06:00.823: INFO: Pod "pod-subpath-test-configmap-7msw": Phase="Running", Reason="", readiness=true. Elapsed: 10.024215296s +Jun 6 13:06:02.825: INFO: Pod "pod-subpath-test-configmap-7msw": Phase="Running", Reason="", readiness=true. Elapsed: 12.026645409s +Jun 6 13:06:04.827: INFO: Pod "pod-subpath-test-configmap-7msw": Phase="Running", Reason="", readiness=true. Elapsed: 14.029074865s +Jun 6 13:06:06.830: INFO: Pod "pod-subpath-test-configmap-7msw": Phase="Running", Reason="", readiness=true. Elapsed: 16.031779689s +Jun 6 13:06:08.832: INFO: Pod "pod-subpath-test-configmap-7msw": Phase="Running", Reason="", readiness=true. Elapsed: 18.034008659s +Jun 6 13:06:10.835: INFO: Pod "pod-subpath-test-configmap-7msw": Phase="Running", Reason="", readiness=true. Elapsed: 20.03702063s +Jun 6 13:06:12.919: INFO: Pod "pod-subpath-test-configmap-7msw": Phase="Succeeded", Reason="", readiness=false. Elapsed: 22.120784509s +STEP: Saw pod success +Jun 6 13:06:12.919: INFO: Pod "pod-subpath-test-configmap-7msw" satisfied condition "success or failure" +Jun 6 13:06:12.921: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-subpath-test-configmap-7msw container test-container-subpath-configmap-7msw: +STEP: delete the pod +Jun 6 13:06:12.937: INFO: Waiting for pod pod-subpath-test-configmap-7msw to disappear +Jun 6 13:06:12.940: INFO: Pod pod-subpath-test-configmap-7msw no longer exists +STEP: Deleting pod pod-subpath-test-configmap-7msw +Jun 6 13:06:12.940: INFO: Deleting pod "pod-subpath-test-configmap-7msw" in namespace "subpath-2792" +[AfterEach] [sig-storage] Subpath + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:06:12.942: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "subpath-2792" for this suite. +Jun 6 13:06:18.952: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:06:19.016: INFO: namespace subpath-2792 deletion completed in 6.071886694s + +• [SLOW TEST:28.353 seconds] +[sig-storage] Subpath +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22 + Atomic writer volumes + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34 + should support subpaths with configmap pod with mountPath of existing file [LinuxOnly] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +[sig-api-machinery] CustomResourceDefinition resources Simple CustomResourceDefinition + creating/deleting custom resource definition objects works [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:06:19.016: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename custom-resource-definition +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in custom-resource-definition-77 +STEP: Waiting for a default service account to be provisioned in namespace +[It] creating/deleting custom resource definition objects works [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +Jun 6 13:06:19.150: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +[AfterEach] [sig-api-machinery] CustomResourceDefinition resources + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:06:20.184: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "custom-resource-definition-77" for this suite. +Jun 6 13:06:26.193: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:06:26.259: INFO: namespace custom-resource-definition-77 deletion completed in 6.073398817s + +• [SLOW TEST:7.244 seconds] +[sig-api-machinery] CustomResourceDefinition resources +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + Simple CustomResourceDefinition + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/custom_resource_definition.go:35 + creating/deleting custom resource definition objects works [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSS +------------------------------ +[sig-storage] Downward API volume + should provide container's memory request [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:06:26.260: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename downward-api +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in downward-api-161 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39 +[It] should provide container's memory request [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a pod to test downward API volume plugin +Jun 6 13:06:26.391: INFO: Waiting up to 5m0s for pod "downwardapi-volume-e438f0bb-885b-11e9-b613-8a9bc7c14a19" in namespace "downward-api-161" to be "success or failure" +Jun 6 13:06:26.397: INFO: Pod "downwardapi-volume-e438f0bb-885b-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 5.842465ms +Jun 6 13:06:28.399: INFO: Pod "downwardapi-volume-e438f0bb-885b-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.007858573s +STEP: Saw pod success +Jun 6 13:06:28.399: INFO: Pod "downwardapi-volume-e438f0bb-885b-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure" +Jun 6 13:06:28.401: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod downwardapi-volume-e438f0bb-885b-11e9-b613-8a9bc7c14a19 container client-container: +STEP: delete the pod +Jun 6 13:06:28.418: INFO: Waiting for pod downwardapi-volume-e438f0bb-885b-11e9-b613-8a9bc7c14a19 to disappear +Jun 6 13:06:28.421: INFO: Pod downwardapi-volume-e438f0bb-885b-11e9-b613-8a9bc7c14a19 no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:06:28.421: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-161" for this suite. +Jun 6 13:06:34.429: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:06:34.571: INFO: namespace downward-api-161 deletion completed in 6.148118248s + +• [SLOW TEST:8.311 seconds] +[sig-storage] Downward API volume +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34 + should provide container's memory request [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSS +------------------------------ +[sig-storage] ConfigMap + should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] ConfigMap + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:06:34.571: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename configmap +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in configmap-5829 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating configMap with name configmap-test-volume-e92ce94c-885b-11e9-b613-8a9bc7c14a19 +STEP: Creating a pod to test consume configMaps +Jun 6 13:06:34.702: INFO: Waiting up to 5m0s for pod "pod-configmaps-e92d395c-885b-11e9-b613-8a9bc7c14a19" in namespace "configmap-5829" to be "success or failure" +Jun 6 13:06:34.706: INFO: Pod "pod-configmaps-e92d395c-885b-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 3.619841ms +Jun 6 13:06:36.708: INFO: Pod "pod-configmaps-e92d395c-885b-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.00619301s +STEP: Saw pod success +Jun 6 13:06:36.708: INFO: Pod "pod-configmaps-e92d395c-885b-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure" +Jun 6 13:06:36.710: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-configmaps-e92d395c-885b-11e9-b613-8a9bc7c14a19 container configmap-volume-test: +STEP: delete the pod +Jun 6 13:06:36.727: INFO: Waiting for pod pod-configmaps-e92d395c-885b-11e9-b613-8a9bc7c14a19 to disappear +Jun 6 13:06:36.729: INFO: Pod pod-configmaps-e92d395c-885b-11e9-b613-8a9bc7c14a19 no longer exists +[AfterEach] [sig-storage] ConfigMap + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:06:36.730: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-5829" for this suite. +Jun 6 13:06:42.739: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:06:42.806: INFO: namespace configmap-5829 deletion completed in 6.074264634s + +• [SLOW TEST:8.235 seconds] +[sig-storage] ConfigMap +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32 + should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSS +------------------------------ +[k8s.io] Docker Containers + should be able to override the image's default command and arguments [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [k8s.io] Docker Containers + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:06:42.806: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename containers +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in containers-7069 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be able to override the image's default command and arguments [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a pod to test override all +Jun 6 13:06:42.937: INFO: Waiting up to 5m0s for pod "client-containers-ee15ecbc-885b-11e9-b613-8a9bc7c14a19" in namespace "containers-7069" to be "success or failure" +Jun 6 13:06:42.940: INFO: Pod "client-containers-ee15ecbc-885b-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.73169ms +Jun 6 13:06:44.942: INFO: Pod "client-containers-ee15ecbc-885b-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.005352179s +Jun 6 13:06:46.945: INFO: Pod "client-containers-ee15ecbc-885b-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.008174278s +STEP: Saw pod success +Jun 6 13:06:46.945: INFO: Pod "client-containers-ee15ecbc-885b-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure" +Jun 6 13:06:46.949: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod client-containers-ee15ecbc-885b-11e9-b613-8a9bc7c14a19 container test-container: +STEP: delete the pod +Jun 6 13:06:46.971: INFO: Waiting for pod client-containers-ee15ecbc-885b-11e9-b613-8a9bc7c14a19 to disappear +Jun 6 13:06:46.973: INFO: Pod client-containers-ee15ecbc-885b-11e9-b613-8a9bc7c14a19 no longer exists +[AfterEach] [k8s.io] Docker Containers + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:06:46.973: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "containers-7069" for this suite. +Jun 6 13:06:52.993: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:06:53.058: INFO: namespace containers-7069 deletion completed in 6.075905445s + +• [SLOW TEST:10.251 seconds] +[k8s.io] Docker Containers +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should be able to override the image's default command and arguments [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSS +------------------------------ +[k8s.io] InitContainer [NodeConformance] + should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:06:53.058: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename init-container +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in init-container-6949 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:43 +[It] should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: creating the pod +Jun 6 13:06:53.183: INFO: PodSpec: initContainers in spec.initContainers +[AfterEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:06:56.336: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "init-container-6949" for this suite. +Jun 6 13:07:02.346: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:07:02.413: INFO: namespace init-container-6949 deletion completed in 6.074015766s + +• [SLOW TEST:9.354 seconds] +[k8s.io] InitContainer [NodeConformance] +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl run pod + should create a pod from an image when restart is Never [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:07:02.413: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in kubectl-5616 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213 +[BeforeEach] [k8s.io] Kubectl run pod + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1583 +[It] should create a pod from an image when restart is Never [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: running the image docker.io/library/nginx:1.14-alpine +Jun 6 13:07:02.537: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 run e2e-test-nginx-pod --restart=Never --generator=run-pod/v1 --image=docker.io/library/nginx:1.14-alpine --namespace=kubectl-5616' +Jun 6 13:07:02.790: INFO: stderr: "" +Jun 6 13:07:02.790: INFO: stdout: "pod/e2e-test-nginx-pod created\n" +STEP: verifying the pod e2e-test-nginx-pod was created +[AfterEach] [k8s.io] Kubectl run pod + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1588 +Jun 6 13:07:02.792: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 delete pods e2e-test-nginx-pod --namespace=kubectl-5616' +Jun 6 13:07:15.825: INFO: stderr: "" +Jun 6 13:07:15.825: INFO: stdout: "pod \"e2e-test-nginx-pod\" deleted\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:07:15.825: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-5616" for this suite. +Jun 6 13:07:21.835: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:07:21.958: INFO: namespace kubectl-5616 deletion completed in 6.130040153s + +• [SLOW TEST:19.545 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + [k8s.io] Kubectl run pod + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should create a pod from an image when restart is Never [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl run --rm job + should create a job from an image, then delete the job [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:07:21.958: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in kubectl-7527 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213 +[It] should create a job from an image, then delete the job [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: executing a command with run --rm and attach with stdin +Jun 6 13:07:22.082: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 --namespace=kubectl-7527 run e2e-test-rm-busybox-job --image=docker.io/library/busybox:1.29 --rm=true --generator=job/v1 --restart=OnFailure --attach=true --stdin -- sh -c cat && echo 'stdin closed'' +Jun 6 13:07:23.496: INFO: stderr: "kubectl run --generator=job/v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\nIf you don't see a command prompt, try pressing enter.\n" +Jun 6 13:07:23.496: INFO: stdout: "abcd1234stdin closed\njob.batch \"e2e-test-rm-busybox-job\" deleted\n" +STEP: verifying the job e2e-test-rm-busybox-job was deleted +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:07:25.500: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-7527" for this suite. +Jun 6 13:07:37.510: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:07:37.756: INFO: namespace kubectl-7527 deletion completed in 12.254674127s + +• [SLOW TEST:15.798 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + [k8s.io] Kubectl run --rm job + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should create a job from an image, then delete the job [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +S +------------------------------ +[sig-node] ConfigMap + should be consumable via the environment [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-node] ConfigMap + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:07:37.757: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename configmap +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in configmap-1774 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable via the environment [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating configMap configmap-1774/configmap-test-0ed6de52-885c-11e9-b613-8a9bc7c14a19 +STEP: Creating a pod to test consume configMaps +Jun 6 13:07:37.893: INFO: Waiting up to 5m0s for pod "pod-configmaps-0ed758d9-885c-11e9-b613-8a9bc7c14a19" in namespace "configmap-1774" to be "success or failure" +Jun 6 13:07:37.901: INFO: Pod "pod-configmaps-0ed758d9-885c-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 8.381584ms +Jun 6 13:07:39.904: INFO: Pod "pod-configmaps-0ed758d9-885c-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.010889701s +STEP: Saw pod success +Jun 6 13:07:39.904: INFO: Pod "pod-configmaps-0ed758d9-885c-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure" +Jun 6 13:07:39.906: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-configmaps-0ed758d9-885c-11e9-b613-8a9bc7c14a19 container env-test: +STEP: delete the pod +Jun 6 13:07:39.924: INFO: Waiting for pod pod-configmaps-0ed758d9-885c-11e9-b613-8a9bc7c14a19 to disappear +Jun 6 13:07:39.926: INFO: Pod pod-configmaps-0ed758d9-885c-11e9-b613-8a9bc7c14a19 no longer exists +[AfterEach] [sig-node] ConfigMap + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:07:39.926: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-1774" for this suite. +Jun 6 13:07:45.935: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:07:46.075: INFO: namespace configmap-1774 deletion completed in 6.147013404s + +• [SLOW TEST:8.319 seconds] +[sig-node] ConfigMap +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap.go:32 + should be consumable via the environment [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +S +------------------------------ +[sig-storage] Secrets + should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Secrets + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:07:46.076: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename secrets +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in secrets-3581 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating secret with name secret-test-13ccf313-885c-11e9-b613-8a9bc7c14a19 +STEP: Creating a pod to test consume secrets +Jun 6 13:07:46.216: INFO: Waiting up to 5m0s for pod "pod-secrets-13cd640d-885c-11e9-b613-8a9bc7c14a19" in namespace "secrets-3581" to be "success or failure" +Jun 6 13:07:46.221: INFO: Pod "pod-secrets-13cd640d-885c-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 4.159889ms +Jun 6 13:07:48.223: INFO: Pod "pod-secrets-13cd640d-885c-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006644153s +STEP: Saw pod success +Jun 6 13:07:48.223: INFO: Pod "pod-secrets-13cd640d-885c-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure" +Jun 6 13:07:48.225: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-secrets-13cd640d-885c-11e9-b613-8a9bc7c14a19 container secret-volume-test: +STEP: delete the pod +Jun 6 13:07:48.245: INFO: Waiting for pod pod-secrets-13cd640d-885c-11e9-b613-8a9bc7c14a19 to disappear +Jun 6 13:07:48.247: INFO: Pod pod-secrets-13cd640d-885c-11e9-b613-8a9bc7c14a19 no longer exists +[AfterEach] [sig-storage] Secrets + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:07:48.247: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-3581" for this suite. +Jun 6 13:07:54.257: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:07:54.338: INFO: namespace secrets-3581 deletion completed in 6.088216146s + +• [SLOW TEST:8.261 seconds] +[sig-storage] Secrets +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:33 + should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Secrets + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Secrets + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:07:54.338: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename secrets +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in secrets-1159 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating secret with name secret-test-map-18b87cf6-885c-11e9-b613-8a9bc7c14a19 +STEP: Creating a pod to test consume secrets +Jun 6 13:07:54.474: INFO: Waiting up to 5m0s for pod "pod-secrets-18b8e7fd-885c-11e9-b613-8a9bc7c14a19" in namespace "secrets-1159" to be "success or failure" +Jun 6 13:07:54.478: INFO: Pod "pod-secrets-18b8e7fd-885c-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 4.022835ms +Jun 6 13:07:56.519: INFO: Pod "pod-secrets-18b8e7fd-885c-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.044814834s +STEP: Saw pod success +Jun 6 13:07:56.519: INFO: Pod "pod-secrets-18b8e7fd-885c-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure" +Jun 6 13:07:56.521: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-secrets-18b8e7fd-885c-11e9-b613-8a9bc7c14a19 container secret-volume-test: +STEP: delete the pod +Jun 6 13:07:56.533: INFO: Waiting for pod pod-secrets-18b8e7fd-885c-11e9-b613-8a9bc7c14a19 to disappear +Jun 6 13:07:56.535: INFO: Pod pod-secrets-18b8e7fd-885c-11e9-b613-8a9bc7c14a19 no longer exists +[AfterEach] [sig-storage] Secrets + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:07:56.535: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-1159" for this suite. +Jun 6 13:08:02.544: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:08:02.608: INFO: namespace secrets-1159 deletion completed in 6.07115168s + +• [SLOW TEST:8.270 seconds] +[sig-storage] Secrets +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:33 + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Proxy server + should support proxy with --port 0 [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:08:02.608: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in kubectl-6159 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213 +[It] should support proxy with --port 0 [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: starting the proxy server +Jun 6 13:08:02.734: INFO: Asynchronously running '/usr/local/bin/kubectl kubectl --kubeconfig=/tmp/kubeconfig-041581163 proxy -p 0 --disable-filter' +STEP: curling proxy /api/ output +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:08:02.790: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-6159" for this suite. +Jun 6 13:08:08.800: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:08:08.960: INFO: namespace kubectl-6159 deletion completed in 6.168199835s + +• [SLOW TEST:6.352 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + [k8s.io] Proxy server + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should support proxy with --port 0 [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Watchers + should be able to start watching from a specific resource version [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-api-machinery] Watchers + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:08:08.961: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename watch +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in watch-6750 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be able to start watching from a specific resource version [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: creating a new configmap +STEP: modifying the configmap once +STEP: modifying the configmap a second time +STEP: deleting the configmap +STEP: creating a watch on configmaps from the resource version returned by the first update +STEP: Expecting to observe notifications for all changes to the configmap after the first update +Jun 6 13:08:09.098: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-resource-version,GenerateName:,Namespace:watch-6750,SelfLink:/api/v1/namespaces/watch-6750/configmaps/e2e-watch-test-resource-version,UID:216fdd87-885c-11e9-bdc9-0231d0af67bc,ResourceVersion:9122,Generation:0,CreationTimestamp:2019-06-06 13:08:09 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: from-resource-version,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},} +Jun 6 13:08:09.098: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-resource-version,GenerateName:,Namespace:watch-6750,SelfLink:/api/v1/namespaces/watch-6750/configmaps/e2e-watch-test-resource-version,UID:216fdd87-885c-11e9-bdc9-0231d0af67bc,ResourceVersion:9123,Generation:0,CreationTimestamp:2019-06-06 13:08:09 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: from-resource-version,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},} +[AfterEach] [sig-api-machinery] Watchers + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:08:09.098: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "watch-6750" for this suite. +Jun 6 13:08:15.107: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:08:15.178: INFO: namespace watch-6750 deletion completed in 6.077711707s + +• [SLOW TEST:6.217 seconds] +[sig-api-machinery] Watchers +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + should be able to start watching from a specific resource version [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Secrets + should be consumable from pods in env vars [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-api-machinery] Secrets + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:08:15.179: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename secrets +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in secrets-3003 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in env vars [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating secret with name secret-test-252beedf-885c-11e9-b613-8a9bc7c14a19 +STEP: Creating a pod to test consume secrets +Jun 6 13:08:15.358: INFO: Waiting up to 5m0s for pod "pod-secrets-252c500b-885c-11e9-b613-8a9bc7c14a19" in namespace "secrets-3003" to be "success or failure" +Jun 6 13:08:15.363: INFO: Pod "pod-secrets-252c500b-885c-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 4.816041ms +Jun 6 13:08:17.365: INFO: Pod "pod-secrets-252c500b-885c-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.007547756s +STEP: Saw pod success +Jun 6 13:08:17.366: INFO: Pod "pod-secrets-252c500b-885c-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure" +Jun 6 13:08:17.367: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-secrets-252c500b-885c-11e9-b613-8a9bc7c14a19 container secret-env-test: +STEP: delete the pod +Jun 6 13:08:17.381: INFO: Waiting for pod pod-secrets-252c500b-885c-11e9-b613-8a9bc7c14a19 to disappear +Jun 6 13:08:17.382: INFO: Pod pod-secrets-252c500b-885c-11e9-b613-8a9bc7c14a19 no longer exists +[AfterEach] [sig-api-machinery] Secrets + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:08:17.383: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-3003" for this suite. +Jun 6 13:08:23.393: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:08:23.461: INFO: namespace secrets-3003 deletion completed in 6.076110388s + +• [SLOW TEST:8.282 seconds] +[sig-api-machinery] Secrets +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets.go:32 + should be consumable from pods in env vars [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSS +------------------------------ +[sig-storage] Projected configMap + should be consumable from pods in volume as non-root [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Projected configMap + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:08:23.461: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-433 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume as non-root [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating configMap with name projected-configmap-test-volume-2a147086-885c-11e9-b613-8a9bc7c14a19 +STEP: Creating a pod to test consume configMaps +Jun 6 13:08:23.594: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-2a14e43e-885c-11e9-b613-8a9bc7c14a19" in namespace "projected-433" to be "success or failure" +Jun 6 13:08:23.598: INFO: Pod "pod-projected-configmaps-2a14e43e-885c-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 4.29248ms +Jun 6 13:08:25.620: INFO: Pod "pod-projected-configmaps-2a14e43e-885c-11e9-b613-8a9bc7c14a19": Phase="Running", Reason="", readiness=true. Elapsed: 2.026074778s +Jun 6 13:08:27.623: INFO: Pod "pod-projected-configmaps-2a14e43e-885c-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.028916689s +STEP: Saw pod success +Jun 6 13:08:27.623: INFO: Pod "pod-projected-configmaps-2a14e43e-885c-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure" +Jun 6 13:08:27.625: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-projected-configmaps-2a14e43e-885c-11e9-b613-8a9bc7c14a19 container projected-configmap-volume-test: +STEP: delete the pod +Jun 6 13:08:27.639: INFO: Waiting for pod pod-projected-configmaps-2a14e43e-885c-11e9-b613-8a9bc7c14a19 to disappear +Jun 6 13:08:27.642: INFO: Pod pod-projected-configmaps-2a14e43e-885c-11e9-b613-8a9bc7c14a19 no longer exists +[AfterEach] [sig-storage] Projected configMap + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:08:27.642: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-433" for this suite. +Jun 6 13:08:33.652: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:08:33.756: INFO: namespace projected-433 deletion completed in 6.112192777s + +• [SLOW TEST:10.295 seconds] +[sig-storage] Projected configMap +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:33 + should be consumable from pods in volume as non-root [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSS +------------------------------ +[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + Burst scaling should run to completion even with unhealthy pods [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:08:33.756: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename statefulset +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in statefulset-9953 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:59 +[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:74 +STEP: Creating service test in namespace statefulset-9953 +[It] Burst scaling should run to completion even with unhealthy pods [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating stateful set ss in namespace statefulset-9953 +STEP: Waiting until all stateful set ss replicas will be running in namespace statefulset-9953 +Jun 6 13:08:33.917: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Pending - Ready=false +Jun 6 13:08:43.921: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +STEP: Confirming that stateful set scale up will not halt with unhealthy stateful pod +Jun 6 13:08:43.923: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +Jun 6 13:08:44.132: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n" +Jun 6 13:08:44.132: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +Jun 6 13:08:44.132: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-0: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +Jun 6 13:08:44.135: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=true +Jun 6 13:08:54.137: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false +Jun 6 13:08:54.137: INFO: Waiting for statefulset status.replicas updated to 0 +Jun 6 13:08:54.150: INFO: POD NODE PHASE GRACE CONDITIONS +Jun 6 13:08:54.150: INFO: ss-0 ip-172-16-66-200.ec2.internal Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:33 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:44 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:44 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:33 +0000 UTC }] +Jun 6 13:08:54.150: INFO: ss-1 Pending [] +Jun 6 13:08:54.150: INFO: +Jun 6 13:08:54.150: INFO: StatefulSet ss has not reached scale 3, at 2 +Jun 6 13:08:55.153: INFO: Verifying statefulset ss doesn't scale past 3 for another 8.994228345s +Jun 6 13:08:56.155: INFO: Verifying statefulset ss doesn't scale past 3 for another 7.991857721s +Jun 6 13:08:57.158: INFO: Verifying statefulset ss doesn't scale past 3 for another 6.989511881s +Jun 6 13:08:58.161: INFO: Verifying statefulset ss doesn't scale past 3 for another 5.98631711s +Jun 6 13:08:59.164: INFO: Verifying statefulset ss doesn't scale past 3 for another 4.98322035s +Jun 6 13:09:00.167: INFO: Verifying statefulset ss doesn't scale past 3 for another 3.98001107s +Jun 6 13:09:01.170: INFO: Verifying statefulset ss doesn't scale past 3 for another 2.977444164s +Jun 6 13:09:02.173: INFO: Verifying statefulset ss doesn't scale past 3 for another 1.974931617s +Jun 6 13:09:03.176: INFO: Verifying statefulset ss doesn't scale past 3 for another 971.766396ms +STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-9953 +Jun 6 13:09:04.179: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:09:04.385: INFO: stderr: "+ mv -v /tmp/index.html /usr/share/nginx/html/\n" +Jun 6 13:09:04.385: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n" +Jun 6 13:09:04.385: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-0: '/tmp/index.html' -> '/usr/share/nginx/html/index.html' + +Jun 6 13:09:04.385: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:09:04.708: INFO: stderr: "+ mv -v /tmp/index.html /usr/share/nginx/html/\nmv: can't rename '/tmp/index.html': No such file or directory\n+ true\n" +Jun 6 13:09:04.708: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n" +Jun 6 13:09:04.708: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-1: '/tmp/index.html' -> '/usr/share/nginx/html/index.html' + +Jun 6 13:09:04.708: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:09:04.994: INFO: stderr: "+ mv -v /tmp/index.html /usr/share/nginx/html/\nmv: can't rename '/tmp/index.html': No such file or directory\n+ true\n" +Jun 6 13:09:04.994: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n" +Jun 6 13:09:04.994: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-2: '/tmp/index.html' -> '/usr/share/nginx/html/index.html' + +Jun 6 13:09:04.996: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +Jun 6 13:09:04.996: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true +Jun 6 13:09:04.996: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Scale down will not halt with unhealthy stateful pod +Jun 6 13:09:04.998: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +Jun 6 13:09:05.227: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n" +Jun 6 13:09:05.227: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +Jun 6 13:09:05.227: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-0: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +Jun 6 13:09:05.227: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-1 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +Jun 6 13:09:05.436: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n" +Jun 6 13:09:05.436: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +Jun 6 13:09:05.436: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-1: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +Jun 6 13:09:05.436: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-2 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +Jun 6 13:09:05.651: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n" +Jun 6 13:09:05.651: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +Jun 6 13:09:05.651: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-2: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +Jun 6 13:09:05.651: INFO: Waiting for statefulset status.replicas updated to 0 +Jun 6 13:09:05.654: INFO: Waiting for stateful set status.readyReplicas to become 0, currently 2 +Jun 6 13:09:15.658: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false +Jun 6 13:09:15.658: INFO: Waiting for pod ss-1 to enter Running - Ready=false, currently Running - Ready=false +Jun 6 13:09:15.658: INFO: Waiting for pod ss-2 to enter Running - Ready=false, currently Running - Ready=false +Jun 6 13:09:15.664: INFO: POD NODE PHASE GRACE CONDITIONS +Jun 6 13:09:15.664: INFO: ss-0 ip-172-16-66-200.ec2.internal Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:33 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:05 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:05 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:33 +0000 UTC }] +Jun 6 13:09:15.664: INFO: ss-1 ip-172-16-89-18.ec2.internal Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC }] +Jun 6 13:09:15.664: INFO: ss-2 ip-172-16-66-200.ec2.internal Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC }] +Jun 6 13:09:15.664: INFO: +Jun 6 13:09:15.664: INFO: StatefulSet ss has not reached scale 0, at 3 +Jun 6 13:09:16.667: INFO: POD NODE PHASE GRACE CONDITIONS +Jun 6 13:09:16.667: INFO: ss-0 ip-172-16-66-200.ec2.internal Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:33 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:05 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:05 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:33 +0000 UTC }] +Jun 6 13:09:16.667: INFO: ss-1 ip-172-16-89-18.ec2.internal Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC }] +Jun 6 13:09:16.667: INFO: ss-2 ip-172-16-66-200.ec2.internal Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC }] +Jun 6 13:09:16.667: INFO: +Jun 6 13:09:16.667: INFO: StatefulSet ss has not reached scale 0, at 3 +Jun 6 13:09:17.670: INFO: POD NODE PHASE GRACE CONDITIONS +Jun 6 13:09:17.670: INFO: ss-0 ip-172-16-66-200.ec2.internal Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:33 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:05 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:05 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:33 +0000 UTC }] +Jun 6 13:09:17.670: INFO: ss-1 ip-172-16-89-18.ec2.internal Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC }] +Jun 6 13:09:17.670: INFO: ss-2 ip-172-16-66-200.ec2.internal Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC }] +Jun 6 13:09:17.670: INFO: +Jun 6 13:09:17.670: INFO: StatefulSet ss has not reached scale 0, at 3 +Jun 6 13:09:18.675: INFO: POD NODE PHASE GRACE CONDITIONS +Jun 6 13:09:18.675: INFO: ss-0 ip-172-16-66-200.ec2.internal Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:33 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:05 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:05 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:33 +0000 UTC }] +Jun 6 13:09:18.675: INFO: ss-1 ip-172-16-89-18.ec2.internal Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC }] +Jun 6 13:09:18.675: INFO: ss-2 ip-172-16-66-200.ec2.internal Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC }] +Jun 6 13:09:18.675: INFO: +Jun 6 13:09:18.675: INFO: StatefulSet ss has not reached scale 0, at 3 +Jun 6 13:09:19.678: INFO: POD NODE PHASE GRACE CONDITIONS +Jun 6 13:09:19.678: INFO: ss-0 ip-172-16-66-200.ec2.internal Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:33 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:05 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:05 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:33 +0000 UTC }] +Jun 6 13:09:19.678: INFO: ss-1 ip-172-16-89-18.ec2.internal Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC }] +Jun 6 13:09:19.678: INFO: ss-2 ip-172-16-66-200.ec2.internal Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC }] +Jun 6 13:09:19.678: INFO: +Jun 6 13:09:19.678: INFO: StatefulSet ss has not reached scale 0, at 3 +Jun 6 13:09:20.681: INFO: POD NODE PHASE GRACE CONDITIONS +Jun 6 13:09:20.681: INFO: ss-0 ip-172-16-66-200.ec2.internal Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:33 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:05 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:05 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:33 +0000 UTC }] +Jun 6 13:09:20.681: INFO: ss-1 ip-172-16-89-18.ec2.internal Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC }] +Jun 6 13:09:20.682: INFO: ss-2 ip-172-16-66-200.ec2.internal Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC }] +Jun 6 13:09:20.682: INFO: +Jun 6 13:09:20.682: INFO: StatefulSet ss has not reached scale 0, at 3 +Jun 6 13:09:21.684: INFO: POD NODE PHASE GRACE CONDITIONS +Jun 6 13:09:21.684: INFO: ss-0 ip-172-16-66-200.ec2.internal Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:33 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:05 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:05 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:33 +0000 UTC }] +Jun 6 13:09:21.684: INFO: ss-1 ip-172-16-89-18.ec2.internal Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC }] +Jun 6 13:09:21.684: INFO: ss-2 ip-172-16-66-200.ec2.internal Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC }] +Jun 6 13:09:21.685: INFO: +Jun 6 13:09:21.685: INFO: StatefulSet ss has not reached scale 0, at 3 +Jun 6 13:09:22.688: INFO: POD NODE PHASE GRACE CONDITIONS +Jun 6 13:09:22.688: INFO: ss-0 ip-172-16-66-200.ec2.internal Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:33 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:05 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:05 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:33 +0000 UTC }] +Jun 6 13:09:22.688: INFO: ss-1 ip-172-16-89-18.ec2.internal Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC }] +Jun 6 13:09:22.688: INFO: ss-2 ip-172-16-66-200.ec2.internal Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC }] +Jun 6 13:09:22.688: INFO: +Jun 6 13:09:22.688: INFO: StatefulSet ss has not reached scale 0, at 3 +Jun 6 13:09:23.691: INFO: POD NODE PHASE GRACE CONDITIONS +Jun 6 13:09:23.691: INFO: ss-0 ip-172-16-66-200.ec2.internal Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:33 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:05 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:05 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:33 +0000 UTC }] +Jun 6 13:09:23.692: INFO: ss-1 ip-172-16-89-18.ec2.internal Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC }] +Jun 6 13:09:23.692: INFO: ss-2 ip-172-16-66-200.ec2.internal Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC }] +Jun 6 13:09:23.692: INFO: +Jun 6 13:09:23.692: INFO: StatefulSet ss has not reached scale 0, at 3 +Jun 6 13:09:24.694: INFO: POD NODE PHASE GRACE CONDITIONS +Jun 6 13:09:24.694: INFO: ss-0 ip-172-16-66-200.ec2.internal Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:33 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:05 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:05 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:33 +0000 UTC }] +Jun 6 13:09:24.694: INFO: ss-2 ip-172-16-66-200.ec2.internal Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:09:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:08:54 +0000 UTC }] +Jun 6 13:09:24.695: INFO: +Jun 6 13:09:24.695: INFO: StatefulSet ss has not reached scale 0, at 2 +STEP: Scaling down stateful set ss to 0 replicas and waiting until none of pods will run in namespacestatefulset-9953 +Jun 6 13:09:25.697: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:09:25.843: INFO: rc: 1 +Jun 6 13:09:25.843: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc00279f440 exit status 1 true [0xc00208b760 0xc00208b778 0xc00208b790] [0xc00208b760 0xc00208b778 0xc00208b790] [0xc00208b770 0xc00208b788] [0x9c00a0 0x9c00a0] 0xc002690960 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:09:35.843: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:09:35.906: INFO: rc: 1 +Jun 6 13:09:35.906: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc002966690 exit status 1 true [0xc00291b4f8 0xc00291b510 0xc00291b528] [0xc00291b4f8 0xc00291b510 0xc00291b528] [0xc00291b508 0xc00291b520] [0x9c00a0 0x9c00a0] 0xc002764540 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:09:45.906: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:09:45.960: INFO: rc: 1 +Jun 6 13:09:45.960: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc0029669c0 exit status 1 true [0xc00291b530 0xc00291b548 0xc00291b560] [0xc00291b530 0xc00291b548 0xc00291b560] [0xc00291b540 0xc00291b558] [0x9c00a0 0x9c00a0] 0xc0027648a0 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:09:55.960: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:09:56.016: INFO: rc: 1 +Jun 6 13:09:56.016: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc002eac300 exit status 1 true [0xc0008d8088 0xc0008d8638 0xc0008d8760] [0xc0008d8088 0xc0008d8638 0xc0008d8760] [0xc0008d8540 0xc0008d8730] [0x9c00a0 0x9c00a0] 0xc002a502a0 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:10:06.016: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:10:06.077: INFO: rc: 1 +Jun 6 13:10:06.077: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc002fa2330 exit status 1 true [0xc0000100d0 0xc0002718f0 0xc000271aa0] [0xc0000100d0 0xc0002718f0 0xc000271aa0] [0xc000270000 0xc000271a20] [0x9c00a0 0x9c00a0] 0xc002dae360 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:10:16.077: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:10:16.175: INFO: rc: 1 +Jun 6 13:10:16.176: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc002eac780 exit status 1 true [0xc0008d87d8 0xc0008d9180 0xc0008d93d8] [0xc0008d87d8 0xc0008d9180 0xc0008d93d8] [0xc0008d90c0 0xc0008d9308] [0x9c00a0 0x9c00a0] 0xc002a50600 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:10:26.176: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:10:26.230: INFO: rc: 1 +Jun 6 13:10:26.231: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc002eacae0 exit status 1 true [0xc0008d9450 0xc0008d95e8 0xc0008d9908] [0xc0008d9450 0xc0008d95e8 0xc0008d9908] [0xc0008d9508 0xc0008d97b0] [0x9c00a0 0x9c00a0] 0xc002a50960 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:10:36.231: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:10:36.284: INFO: rc: 1 +Jun 6 13:10:36.285: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc002eace40 exit status 1 true [0xc0008d9a28 0xc0008d9cc8 0xc0008d9db8] [0xc0008d9a28 0xc0008d9cc8 0xc0008d9db8] [0xc0008d9c38 0xc0008d9da8] [0x9c00a0 0x9c00a0] 0xc002a50cc0 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:10:46.285: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:10:46.337: INFO: rc: 1 +Jun 6 13:10:46.338: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc002ead1a0 exit status 1 true [0xc0008d9de8 0xc0008d9fb8 0xc0011b8280] [0xc0008d9de8 0xc0008d9fb8 0xc0011b8280] [0xc0008d9f98 0xc0011b8168] [0x9c00a0 0x9c00a0] 0xc002a51080 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:10:56.338: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:10:56.394: INFO: rc: 1 +Jun 6 13:10:56.394: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc002ead500 exit status 1 true [0xc0011b8448 0xc0011b8a80 0xc0011b9078] [0xc0011b8448 0xc0011b8a80 0xc0011b9078] [0xc0011b86e8 0xc0011b8e70] [0x9c00a0 0x9c00a0] 0xc002a515c0 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:11:06.395: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:11:06.466: INFO: rc: 1 +Jun 6 13:11:06.466: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc002ead860 exit status 1 true [0xc0011b91e0 0xc0011b9280 0xc0011b93c8] [0xc0011b91e0 0xc0011b9280 0xc0011b93c8] [0xc0011b9238 0xc0011b9390] [0x9c00a0 0x9c00a0] 0xc002a51b00 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:11:16.466: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:11:16.621: INFO: rc: 1 +Jun 6 13:11:16.621: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc002eadbc0 exit status 1 true [0xc0011b93f0 0xc0011b9548 0xc0011b95e0] [0xc0011b93f0 0xc0011b9548 0xc0011b95e0] [0xc0011b9528 0xc0011b9598] [0x9c00a0 0x9c00a0] 0xc002d7e000 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:11:26.621: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:11:26.676: INFO: rc: 1 +Jun 6 13:11:26.677: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc002eadf20 exit status 1 true [0xc0011b96f8 0xc0011b98b0 0xc0011b99d0] [0xc0011b96f8 0xc0011b98b0 0xc0011b99d0] [0xc0011b9840 0xc0011b9988] [0x9c00a0 0x9c00a0] 0xc002d7e600 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:11:36.677: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:11:36.732: INFO: rc: 1 +Jun 6 13:11:36.732: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc002fa26c0 exit status 1 true [0xc000271cc0 0xc001234030 0xc001234070] [0xc000271cc0 0xc001234030 0xc001234070] [0xc001234018 0xc001234068] [0x9c00a0 0x9c00a0] 0xc002dae7e0 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:11:46.732: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:11:46.788: INFO: rc: 1 +Jun 6 13:11:46.788: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc002fa2b70 exit status 1 true [0xc001234078 0xc0012340b8 0xc001234170] [0xc001234078 0xc0012340b8 0xc001234170] [0xc0012340b0 0xc001234100] [0x9c00a0 0x9c00a0] 0xc002daec00 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:11:56.789: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:11:56.847: INFO: rc: 1 +Jun 6 13:11:56.847: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc00313c270 exit status 1 true [0xc0011b9a08 0xc0011b9a98 0xc0011b9b58] [0xc0011b9a08 0xc0011b9a98 0xc0011b9b58] [0xc0011b9a80 0xc0011b9b10] [0x9c00a0 0x9c00a0] 0xc002d7ea80 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:12:06.847: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:12:06.905: INFO: rc: 1 +Jun 6 13:12:06.905: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc00313c5a0 exit status 1 true [0xc000270000 0xc000271a20 0xc000271ef0] [0xc000270000 0xc000271a20 0xc000271ef0] [0xc000271990 0xc000271cc0] [0x9c00a0 0x9c00a0] 0xc002a502a0 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:12:16.906: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:12:16.962: INFO: rc: 1 +Jun 6 13:12:16.963: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc00313ca20 exit status 1 true [0xc0000100d0 0xc0008d8088 0xc0008d8638] [0xc0000100d0 0xc0008d8088 0xc0008d8638] [0xc0008d8010 0xc0008d8540] [0x9c00a0 0x9c00a0] 0xc002a50600 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:12:26.963: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:12:27.018: INFO: rc: 1 +Jun 6 13:12:27.018: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc002eac360 exit status 1 true [0xc0011b80c0 0xc0011b8448 0xc0011b8a80] [0xc0011b80c0 0xc0011b8448 0xc0011b8a80] [0xc0011b8280 0xc0011b86e8] [0x9c00a0 0x9c00a0] 0xc002d7e4e0 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:12:37.018: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:12:37.071: INFO: rc: 1 +Jun 6 13:12:37.072: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc002eac840 exit status 1 true [0xc0011b8d00 0xc0011b91e0 0xc0011b9280] [0xc0011b8d00 0xc0011b91e0 0xc0011b9280] [0xc0011b9078 0xc0011b9238] [0x9c00a0 0x9c00a0] 0xc002d7ed80 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:12:47.072: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:12:47.127: INFO: rc: 1 +Jun 6 13:12:47.127: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc00313cdb0 exit status 1 true [0xc0008d8710 0xc0008d87d8 0xc0008d9180] [0xc0008d8710 0xc0008d87d8 0xc0008d9180] [0xc0008d8760 0xc0008d90c0] [0x9c00a0 0x9c00a0] 0xc002a50960 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:12:57.127: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:12:57.188: INFO: rc: 1 +Jun 6 13:12:57.188: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc002eacbd0 exit status 1 true [0xc0011b92a0 0xc0011b93f0 0xc0011b9548] [0xc0011b92a0 0xc0011b93f0 0xc0011b9548] [0xc0011b93c8 0xc0011b9528] [0x9c00a0 0x9c00a0] 0xc002d7f200 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:13:07.189: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:13:07.326: INFO: rc: 1 +Jun 6 13:13:07.327: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc002eacf60 exit status 1 true [0xc0011b9560 0xc0011b96f8 0xc0011b98b0] [0xc0011b9560 0xc0011b96f8 0xc0011b98b0] [0xc0011b95e0 0xc0011b9840] [0x9c00a0 0x9c00a0] 0xc002d7f680 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:13:17.327: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:13:17.385: INFO: rc: 1 +Jun 6 13:13:17.385: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc00313d110 exit status 1 true [0xc0008d92d8 0xc0008d9450 0xc0008d95e8] [0xc0008d92d8 0xc0008d9450 0xc0008d95e8] [0xc0008d93d8 0xc0008d9508] [0x9c00a0 0x9c00a0] 0xc002a50cc0 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:13:27.385: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:13:27.441: INFO: rc: 1 +Jun 6 13:13:27.441: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc00313d440 exit status 1 true [0xc0008d95f8 0xc0008d9a28 0xc0008d9cc8] [0xc0008d95f8 0xc0008d9a28 0xc0008d9cc8] [0xc0008d9908 0xc0008d9c38] [0x9c00a0 0x9c00a0] 0xc002a51080 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:13:37.441: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:13:37.503: INFO: rc: 1 +Jun 6 13:13:37.503: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc002ead320 exit status 1 true [0xc0011b9910 0xc0011b9bf8 0xc0011b9cc0] [0xc0011b9910 0xc0011b9bf8 0xc0011b9cc0] [0xc0011b99d0 0xc0011b9c60] [0x9c00a0 0x9c00a0] 0xc002d7fb00 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:13:47.503: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:13:47.601: INFO: rc: 1 +Jun 6 13:13:47.601: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc002ead6b0 exit status 1 true [0xc0011b9d20 0xc0011b9e08 0xc0011b9ef0] [0xc0011b9d20 0xc0011b9e08 0xc0011b9ef0] [0xc0011b9d90 0xc0011b9ee8] [0x9c00a0 0x9c00a0] 0xc002dae000 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:13:57.601: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:13:57.656: INFO: rc: 1 +Jun 6 13:13:57.656: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc00313c330 exit status 1 true [0xc0000100e8 0xc000271990 0xc000271cc0] [0xc0000100e8 0xc000271990 0xc000271cc0] [0xc0002718f0 0xc000271aa0] [0x9c00a0 0x9c00a0] 0xc002d7e4e0 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:14:07.657: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:14:07.712: INFO: rc: 1 +Jun 6 13:14:07.712: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc002eac300 exit status 1 true [0xc0008d8010 0xc0008d8540 0xc0008d8730] [0xc0008d8010 0xc0008d8540 0xc0008d8730] [0xc0008d84d0 0xc0008d8710] [0x9c00a0 0x9c00a0] 0xc002a502a0 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:14:17.712: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:14:17.770: INFO: rc: 1 +Jun 6 13:14:17.770: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] [] Error from server (NotFound): pods "ss-0" not found + [] 0xc00313c6c0 exit status 1 true [0xc000271ef0 0xc0011b8280 0xc0011b86e8] [0xc000271ef0 0xc0011b8280 0xc0011b86e8] [0xc0011b8168 0xc0011b85e8] [0x9c00a0 0x9c00a0] 0xc002d7e960 }: +Command stdout: + +stderr: +Error from server (NotFound): pods "ss-0" not found + +error: +exit status 1 + +Jun 6 13:14:27.770: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-9953 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +Jun 6 13:14:27.825: INFO: rc: 1 +Jun 6 13:14:27.825: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-0: +Jun 6 13:14:27.825: INFO: Scaling statefulset ss to 0 +Jun 6 13:14:27.831: INFO: Waiting for statefulset status.replicas updated to 0 +[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:85 +Jun 6 13:14:27.833: INFO: Deleting all statefulset in ns statefulset-9953 +Jun 6 13:14:27.834: INFO: Scaling statefulset ss to 0 +Jun 6 13:14:27.840: INFO: Waiting for statefulset status.replicas updated to 0 +Jun 6 13:14:27.842: INFO: Deleting statefulset ss +[AfterEach] [sig-apps] StatefulSet + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:14:27.851: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "statefulset-9953" for this suite. +Jun 6 13:14:33.869: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:14:33.966: INFO: namespace statefulset-9953 deletion completed in 6.111366454s + +• [SLOW TEST:360.210 seconds] +[sig-apps] StatefulSet +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + Burst scaling should run to completion even with unhealthy pods [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSS +------------------------------ +[sig-storage] Secrets + should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Secrets + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:14:33.966: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename secrets +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in secrets-9389 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating secret with name secret-test-06f3b19c-885d-11e9-b613-8a9bc7c14a19 +STEP: Creating a pod to test consume secrets +Jun 6 13:14:34.157: INFO: Waiting up to 5m0s for pod "pod-secrets-06f410a6-885d-11e9-b613-8a9bc7c14a19" in namespace "secrets-9389" to be "success or failure" +Jun 6 13:14:34.162: INFO: Pod "pod-secrets-06f410a6-885d-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 4.779664ms +Jun 6 13:14:36.164: INFO: Pod "pod-secrets-06f410a6-885d-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.006828061s +Jun 6 13:14:38.166: INFO: Pod "pod-secrets-06f410a6-885d-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.009394584s +STEP: Saw pod success +Jun 6 13:14:38.166: INFO: Pod "pod-secrets-06f410a6-885d-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure" +Jun 6 13:14:38.168: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-secrets-06f410a6-885d-11e9-b613-8a9bc7c14a19 container secret-volume-test: +STEP: delete the pod +Jun 6 13:14:38.195: INFO: Waiting for pod pod-secrets-06f410a6-885d-11e9-b613-8a9bc7c14a19 to disappear +Jun 6 13:14:38.197: INFO: Pod pod-secrets-06f410a6-885d-11e9-b613-8a9bc7c14a19 no longer exists +[AfterEach] [sig-storage] Secrets + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:14:38.197: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-9389" for this suite. +Jun 6 13:14:44.213: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:14:44.279: INFO: namespace secrets-9389 deletion completed in 6.079977878s + +• [SLOW TEST:10.313 seconds] +[sig-storage] Secrets +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:33 + should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[k8s.io] Container Runtime blackbox test when starting a container that exits + should run with the expected status [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [k8s.io] Container Runtime + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:14:44.279: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename container-runtime +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in container-runtime-640 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should run with the expected status [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Container 'terminate-cmd-rpa': should get the expected 'RestartCount' +STEP: Container 'terminate-cmd-rpa': should get the expected 'Phase' +STEP: Container 'terminate-cmd-rpa': should get the expected 'Ready' condition +STEP: Container 'terminate-cmd-rpa': should get the expected 'State' +STEP: Container 'terminate-cmd-rpa': should be possible to delete [NodeConformance] +STEP: Container 'terminate-cmd-rpof': should get the expected 'RestartCount' +STEP: Container 'terminate-cmd-rpof': should get the expected 'Phase' +STEP: Container 'terminate-cmd-rpof': should get the expected 'Ready' condition +STEP: Container 'terminate-cmd-rpof': should get the expected 'State' +STEP: Container 'terminate-cmd-rpof': should be possible to delete [NodeConformance] +STEP: Container 'terminate-cmd-rpn': should get the expected 'RestartCount' +STEP: Container 'terminate-cmd-rpn': should get the expected 'Phase' +STEP: Container 'terminate-cmd-rpn': should get the expected 'Ready' condition +STEP: Container 'terminate-cmd-rpn': should get the expected 'State' +STEP: Container 'terminate-cmd-rpn': should be possible to delete [NodeConformance] +[AfterEach] [k8s.io] Container Runtime + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:15:09.573: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-runtime-640" for this suite. +Jun 6 13:15:15.582: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:15:15.652: INFO: namespace container-runtime-640 deletion completed in 6.076499728s + +• [SLOW TEST:31.373 seconds] +[k8s.io] Container Runtime +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + blackbox test + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/runtime.go:37 + when starting a container that exits + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/runtime.go:38 + should run with the expected status [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[k8s.io] Probing container + should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:15:15.653: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename container-probe +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in container-probe-7224 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:51 +[It] should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating pod liveness-exec in namespace container-probe-7224 +Jun 6 13:15:17.789: INFO: Started pod liveness-exec in namespace container-probe-7224 +STEP: checking the pod's current state and verifying that restartCount is present +Jun 6 13:15:17.792: INFO: Initial restart count of pod liveness-exec is 0 +Jun 6 13:16:07.940: INFO: Restart count of pod container-probe-7224/liveness-exec is now 1 (50.148581892s elapsed) +STEP: deleting the pod +[AfterEach] [k8s.io] Probing container + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:16:07.949: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-probe-7224" for this suite. +Jun 6 13:16:13.960: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:16:14.054: INFO: namespace container-probe-7224 deletion completed in 6.101046818s + +• [SLOW TEST:58.401 seconds] +[k8s.io] Probing container +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Subpath Atomic writer volumes + should support subpaths with secret pod [LinuxOnly] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Subpath + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:16:14.057: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename subpath +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in subpath-3468 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] Atomic writer volumes + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38 +STEP: Setting up data +[It] should support subpaths with secret pod [LinuxOnly] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating pod pod-subpath-test-secret-tk29 +STEP: Creating a pod to test atomic-volume-subpath +Jun 6 13:16:14.193: INFO: Waiting up to 5m0s for pod "pod-subpath-test-secret-tk29" in namespace "subpath-3468" to be "success or failure" +Jun 6 13:16:14.198: INFO: Pod "pod-subpath-test-secret-tk29": Phase="Pending", Reason="", readiness=false. Elapsed: 4.574983ms +Jun 6 13:16:16.200: INFO: Pod "pod-subpath-test-secret-tk29": Phase="Running", Reason="", readiness=true. Elapsed: 2.007028657s +Jun 6 13:16:18.202: INFO: Pod "pod-subpath-test-secret-tk29": Phase="Running", Reason="", readiness=true. Elapsed: 4.009408332s +Jun 6 13:16:20.205: INFO: Pod "pod-subpath-test-secret-tk29": Phase="Running", Reason="", readiness=true. Elapsed: 6.011902535s +Jun 6 13:16:22.208: INFO: Pod "pod-subpath-test-secret-tk29": Phase="Running", Reason="", readiness=true. Elapsed: 8.014698115s +Jun 6 13:16:24.211: INFO: Pod "pod-subpath-test-secret-tk29": Phase="Running", Reason="", readiness=true. Elapsed: 10.017510634s +Jun 6 13:16:26.213: INFO: Pod "pod-subpath-test-secret-tk29": Phase="Running", Reason="", readiness=true. Elapsed: 12.020151044s +Jun 6 13:16:28.216: INFO: Pod "pod-subpath-test-secret-tk29": Phase="Running", Reason="", readiness=true. Elapsed: 14.023145745s +Jun 6 13:16:30.219: INFO: Pod "pod-subpath-test-secret-tk29": Phase="Running", Reason="", readiness=true. Elapsed: 16.025756043s +Jun 6 13:16:32.222: INFO: Pod "pod-subpath-test-secret-tk29": Phase="Running", Reason="", readiness=true. Elapsed: 18.02866679s +Jun 6 13:16:34.224: INFO: Pod "pod-subpath-test-secret-tk29": Phase="Running", Reason="", readiness=true. Elapsed: 20.031215674s +Jun 6 13:16:36.226: INFO: Pod "pod-subpath-test-secret-tk29": Phase="Succeeded", Reason="", readiness=false. Elapsed: 22.033373951s +STEP: Saw pod success +Jun 6 13:16:36.226: INFO: Pod "pod-subpath-test-secret-tk29" satisfied condition "success or failure" +Jun 6 13:16:36.228: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-subpath-test-secret-tk29 container test-container-subpath-secret-tk29: +STEP: delete the pod +Jun 6 13:16:36.242: INFO: Waiting for pod pod-subpath-test-secret-tk29 to disappear +Jun 6 13:16:36.244: INFO: Pod pod-subpath-test-secret-tk29 no longer exists +STEP: Deleting pod pod-subpath-test-secret-tk29 +Jun 6 13:16:36.244: INFO: Deleting pod "pod-subpath-test-secret-tk29" in namespace "subpath-3468" +[AfterEach] [sig-storage] Subpath + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:16:36.246: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "subpath-3468" for this suite. +Jun 6 13:16:42.254: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:16:42.324: INFO: namespace subpath-3468 deletion completed in 6.076651112s + +• [SLOW TEST:28.267 seconds] +[sig-storage] Subpath +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22 + Atomic writer volumes + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34 + should support subpaths with secret pod [LinuxOnly] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected downwardAPI + should provide container's cpu request [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:16:42.325: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-3646 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39 +[It] should provide container's cpu request [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a pod to test downward API volume plugin +Jun 6 13:16:42.454: INFO: Waiting up to 5m0s for pod "downwardapi-volume-536cbecc-885d-11e9-b613-8a9bc7c14a19" in namespace "projected-3646" to be "success or failure" +Jun 6 13:16:42.459: INFO: Pod "downwardapi-volume-536cbecc-885d-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 5.46955ms +Jun 6 13:16:44.463: INFO: Pod "downwardapi-volume-536cbecc-885d-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.009071652s +STEP: Saw pod success +Jun 6 13:16:44.463: INFO: Pod "downwardapi-volume-536cbecc-885d-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure" +Jun 6 13:16:44.465: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod downwardapi-volume-536cbecc-885d-11e9-b613-8a9bc7c14a19 container client-container: +STEP: delete the pod +Jun 6 13:16:44.479: INFO: Waiting for pod downwardapi-volume-536cbecc-885d-11e9-b613-8a9bc7c14a19 to disappear +Jun 6 13:16:44.481: INFO: Pod downwardapi-volume-536cbecc-885d-11e9-b613-8a9bc7c14a19 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:16:44.482: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-3646" for this suite. +Jun 6 13:16:50.491: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:16:50.565: INFO: namespace projected-3646 deletion completed in 6.081144761s + +• [SLOW TEST:8.240 seconds] +[sig-storage] Projected downwardAPI +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33 + should provide container's cpu request [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +[sig-apps] Daemon set [Serial] + should run and stop complex daemon [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:16:50.565: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename daemonsets +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in daemonsets-5772 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:102 +[It] should run and stop complex daemon [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +Jun 6 13:16:50.702: INFO: Creating daemon "daemon-set" with a node selector +STEP: Initially, daemon pods should not be running on any nodes. +Jun 6 13:16:50.708: INFO: Number of nodes with available pods: 0 +Jun 6 13:16:50.708: INFO: Number of running nodes: 0, number of available pods: 0 +STEP: Change node label to blue, check that daemon pod is launched. +Jun 6 13:16:50.725: INFO: Number of nodes with available pods: 0 +Jun 6 13:16:50.725: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod +Jun 6 13:16:51.727: INFO: Number of nodes with available pods: 0 +Jun 6 13:16:51.727: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod +Jun 6 13:16:52.728: INFO: Number of nodes with available pods: 0 +Jun 6 13:16:52.728: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod +Jun 6 13:16:53.734: INFO: Number of nodes with available pods: 1 +Jun 6 13:16:53.734: INFO: Number of running nodes: 1, number of available pods: 1 +STEP: Update the node label to green, and wait for daemons to be unscheduled +Jun 6 13:16:53.838: INFO: Number of nodes with available pods: 1 +Jun 6 13:16:53.838: INFO: Number of running nodes: 0, number of available pods: 1 +Jun 6 13:16:54.840: INFO: Number of nodes with available pods: 0 +Jun 6 13:16:54.840: INFO: Number of running nodes: 0, number of available pods: 0 +STEP: Update DaemonSet node selector to green, and change its update strategy to RollingUpdate +Jun 6 13:16:54.846: INFO: Number of nodes with available pods: 0 +Jun 6 13:16:54.846: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod +Jun 6 13:16:55.849: INFO: Number of nodes with available pods: 0 +Jun 6 13:16:55.849: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod +Jun 6 13:16:56.849: INFO: Number of nodes with available pods: 0 +Jun 6 13:16:56.849: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod +Jun 6 13:16:57.849: INFO: Number of nodes with available pods: 0 +Jun 6 13:16:57.849: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod +Jun 6 13:16:58.849: INFO: Number of nodes with available pods: 0 +Jun 6 13:16:58.849: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod +Jun 6 13:16:59.849: INFO: Number of nodes with available pods: 0 +Jun 6 13:16:59.849: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod +Jun 6 13:17:00.849: INFO: Number of nodes with available pods: 0 +Jun 6 13:17:00.849: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod +Jun 6 13:17:01.849: INFO: Number of nodes with available pods: 0 +Jun 6 13:17:01.849: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod +Jun 6 13:17:02.850: INFO: Number of nodes with available pods: 0 +Jun 6 13:17:02.850: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod +Jun 6 13:17:03.849: INFO: Number of nodes with available pods: 0 +Jun 6 13:17:03.849: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod +Jun 6 13:17:04.849: INFO: Number of nodes with available pods: 0 +Jun 6 13:17:04.849: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod +Jun 6 13:17:05.859: INFO: Number of nodes with available pods: 0 +Jun 6 13:17:05.859: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod +Jun 6 13:17:06.849: INFO: Number of nodes with available pods: 0 +Jun 6 13:17:06.849: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod +Jun 6 13:17:07.849: INFO: Number of nodes with available pods: 0 +Jun 6 13:17:07.849: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod +Jun 6 13:17:08.849: INFO: Number of nodes with available pods: 1 +Jun 6 13:17:08.850: INFO: Number of running nodes: 1, number of available pods: 1 +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:68 +STEP: Deleting DaemonSet "daemon-set" +STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-5772, will wait for the garbage collector to delete the pods +Jun 6 13:17:08.908: INFO: Deleting DaemonSet.extensions daemon-set took: 3.485261ms +Jun 6 13:17:09.309: INFO: Terminating DaemonSet.extensions daemon-set pods took: 400.270878ms +Jun 6 13:17:12.411: INFO: Number of nodes with available pods: 0 +Jun 6 13:17:12.411: INFO: Number of running nodes: 0, number of available pods: 0 +Jun 6 13:17:12.414: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/daemonsets-5772/daemonsets","resourceVersion":"10415"},"items":null} + +Jun 6 13:17:12.415: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/daemonsets-5772/pods","resourceVersion":"10415"},"items":null} + +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:17:12.432: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "daemonsets-5772" for this suite. +Jun 6 13:17:18.442: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:17:18.555: INFO: namespace daemonsets-5772 deletion completed in 6.119917018s + +• [SLOW TEST:27.989 seconds] +[sig-apps] Daemon set [Serial] +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + should run and stop complex daemon [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[k8s.io] Probing container + should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:17:18.556: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename container-probe +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in container-probe-8338 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:51 +[It] should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating pod liveness-http in namespace container-probe-8338 +Jun 6 13:17:20.745: INFO: Started pod liveness-http in namespace container-probe-8338 +STEP: checking the pod's current state and verifying that restartCount is present +Jun 6 13:17:20.747: INFO: Initial restart count of pod liveness-http is 0 +STEP: deleting the pod +[AfterEach] [k8s.io] Probing container + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:21:21.289: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-probe-8338" for this suite. +Jun 6 13:21:27.302: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:21:27.367: INFO: namespace container-probe-8338 deletion completed in 6.073420707s + +• [SLOW TEST:248.811 seconds] +[k8s.io] Probing container +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] Service endpoints latency + should not be very high [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-network] Service endpoints latency + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:21:27.368: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename svc-latency +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in svc-latency-2928 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should not be very high [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: creating replication controller svc-latency-rc in namespace svc-latency-2928 +I0606 13:21:27.494485 14 runners.go:184] Created replication controller with name: svc-latency-rc, namespace: svc-latency-2928, replica count: 1 +I0606 13:21:28.544975 14 runners.go:184] svc-latency-rc Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0606 13:21:29.545216 14 runners.go:184] svc-latency-rc Pods: 1 out of 1 created, 1 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Jun 6 13:21:29.661: INFO: Created: latency-svc-k7md9 +Jun 6 13:21:29.664: INFO: Got endpoints: latency-svc-k7md9 [19.289842ms] +Jun 6 13:21:29.676: INFO: Created: latency-svc-6mnpp +Jun 6 13:21:29.681: INFO: Created: latency-svc-6wkjv +Jun 6 13:21:29.685: INFO: Created: latency-svc-gggrn +Jun 6 13:21:29.690: INFO: Created: latency-svc-652d5 +Jun 6 13:21:29.697: INFO: Created: latency-svc-qpv7z +Jun 6 13:21:29.701: INFO: Created: latency-svc-fnmv7 +Jun 6 13:21:29.705: INFO: Created: latency-svc-c9dmk +Jun 6 13:21:29.708: INFO: Created: latency-svc-46glx +Jun 6 13:21:29.730: INFO: Got endpoints: latency-svc-652d5 [64.537508ms] +Jun 6 13:21:29.731: INFO: Got endpoints: latency-svc-qpv7z [65.890424ms] +Jun 6 13:21:29.745: INFO: Got endpoints: latency-svc-gggrn [78.235561ms] +Jun 6 13:21:29.745: INFO: Got endpoints: latency-svc-6wkjv [80.681252ms] +Jun 6 13:21:29.745: INFO: Got endpoints: latency-svc-6mnpp [80.805438ms] +Jun 6 13:21:29.754: INFO: Created: latency-svc-vfb46 +Jun 6 13:21:29.755: INFO: Got endpoints: latency-svc-c9dmk [90.353107ms] +Jun 6 13:21:29.762: INFO: Got endpoints: latency-svc-46glx [95.563678ms] +Jun 6 13:21:29.772: INFO: Created: latency-svc-v28mt +Jun 6 13:21:29.772: INFO: Got endpoints: latency-svc-v28mt [105.794122ms] +Jun 6 13:21:29.772: INFO: Got endpoints: latency-svc-fnmv7 [107.294679ms] +Jun 6 13:21:29.772: INFO: Got endpoints: latency-svc-vfb46 [107.517451ms] +Jun 6 13:21:29.772: INFO: Created: latency-svc-8wwjs +Jun 6 13:21:29.772: INFO: Got endpoints: latency-svc-8wwjs [106.474726ms] +Jun 6 13:21:29.773: INFO: Created: latency-svc-vw4zh +Jun 6 13:21:29.773: INFO: Created: latency-svc-2f6bj +Jun 6 13:21:29.773: INFO: Got endpoints: latency-svc-vw4zh [106.340772ms] +Jun 6 13:21:29.774: INFO: Got endpoints: latency-svc-2f6bj [107.673538ms] +Jun 6 13:21:29.778: INFO: Created: latency-svc-g9klq +Jun 6 13:21:29.778: INFO: Got endpoints: latency-svc-g9klq [111.284985ms] +Jun 6 13:21:29.785: INFO: Created: latency-svc-tdqq5 +Jun 6 13:21:29.789: INFO: Got endpoints: latency-svc-tdqq5 [121.789384ms] +Jun 6 13:21:29.789: INFO: Created: latency-svc-5xjmk +Jun 6 13:21:29.795: INFO: Created: latency-svc-xb4jm +Jun 6 13:21:29.798: INFO: Got endpoints: latency-svc-5xjmk [51.72967ms] +Jun 6 13:21:29.804: INFO: Created: latency-svc-98zvl +Jun 6 13:21:29.807: INFO: Got endpoints: latency-svc-xb4jm [60.410853ms] +Jun 6 13:21:29.810: INFO: Created: latency-svc-4sqhs +Jun 6 13:21:29.811: INFO: Got endpoints: latency-svc-98zvl [65.13051ms] +Jun 6 13:21:29.829: INFO: Created: latency-svc-6wghj +Jun 6 13:21:29.829: INFO: Created: latency-svc-hb7tr +Jun 6 13:21:29.818: INFO: Got endpoints: latency-svc-4sqhs [71.092505ms] +Jun 6 13:21:29.829: INFO: Got endpoints: latency-svc-6wghj [75.853215ms] +Jun 6 13:21:29.834: INFO: Got endpoints: latency-svc-hb7tr [78.570184ms] +Jun 6 13:21:29.838: INFO: Created: latency-svc-s47gr +Jun 6 13:21:29.840: INFO: Got endpoints: latency-svc-s47gr [78.915805ms] +Jun 6 13:21:29.850: INFO: Created: latency-svc-mwh6d +Jun 6 13:21:29.854: INFO: Got endpoints: latency-svc-mwh6d [82.675376ms] +Jun 6 13:21:29.855: INFO: Created: latency-svc-x7chw +Jun 6 13:21:29.855: INFO: Got endpoints: latency-svc-x7chw [83.111965ms] +Jun 6 13:21:29.860: INFO: Created: latency-svc-qqbgp +Jun 6 13:21:29.865: INFO: Got endpoints: latency-svc-qqbgp [92.235922ms] +Jun 6 13:21:29.870: INFO: Created: latency-svc-fcbms +Jun 6 13:21:29.874: INFO: Got endpoints: latency-svc-fcbms [102.158074ms] +Jun 6 13:21:29.886: INFO: Created: latency-svc-v7m8d +Jun 6 13:21:29.886: INFO: Got endpoints: latency-svc-v7m8d [112.817837ms] +Jun 6 13:21:29.894: INFO: Created: latency-svc-5wcr7 +Jun 6 13:21:29.898: INFO: Got endpoints: latency-svc-5wcr7 [124.685379ms] +Jun 6 13:21:29.899: INFO: Created: latency-svc-8wxv7 +Jun 6 13:21:29.902: INFO: Got endpoints: latency-svc-8wxv7 [123.303611ms] +Jun 6 13:21:29.904: INFO: Created: latency-svc-2cjrn +Jun 6 13:21:29.910: INFO: Got endpoints: latency-svc-2cjrn [121.068027ms] +Jun 6 13:21:29.911: INFO: Created: latency-svc-p8zg9 +Jun 6 13:21:29.917: INFO: Created: latency-svc-4cmsn +Jun 6 13:21:29.919: INFO: Got endpoints: latency-svc-p8zg9 [121.734558ms] +Jun 6 13:21:29.926: INFO: Got endpoints: latency-svc-4cmsn [119.356983ms] +Jun 6 13:21:29.934: INFO: Created: latency-svc-fvmbm +Jun 6 13:21:29.936: INFO: Got endpoints: latency-svc-fvmbm [123.438525ms] +Jun 6 13:21:29.936: INFO: Created: latency-svc-z8bhf +Jun 6 13:21:29.941: INFO: Created: latency-svc-sb778 +Jun 6 13:21:29.944: INFO: Got endpoints: latency-svc-z8bhf [114.601864ms] +Jun 6 13:21:29.949: INFO: Created: latency-svc-2zjv4 +Jun 6 13:21:29.952: INFO: Created: latency-svc-s89dd +Jun 6 13:21:29.959: INFO: Created: latency-svc-28vpf +Jun 6 13:21:29.966: INFO: Created: latency-svc-m5h27 +Jun 6 13:21:29.970: INFO: Created: latency-svc-w9psd +Jun 6 13:21:29.978: INFO: Created: latency-svc-b4xkj +Jun 6 13:21:29.978: INFO: Got endpoints: latency-svc-sb778 [148.413069ms] +Jun 6 13:21:29.983: INFO: Created: latency-svc-ldxc9 +Jun 6 13:21:29.987: INFO: Created: latency-svc-h7r79 +Jun 6 13:21:29.994: INFO: Created: latency-svc-lpb69 +Jun 6 13:21:29.998: INFO: Created: latency-svc-kvml7 +Jun 6 13:21:30.004: INFO: Created: latency-svc-s7zvw +Jun 6 13:21:30.009: INFO: Created: latency-svc-4qb6w +Jun 6 13:21:30.017: INFO: Created: latency-svc-q95rb +Jun 6 13:21:30.030: INFO: Got endpoints: latency-svc-2zjv4 [196.314706ms] +Jun 6 13:21:30.044: INFO: Created: latency-svc-56grj +Jun 6 13:21:30.044: INFO: Created: latency-svc-zp5cw +Jun 6 13:21:30.054: INFO: Created: latency-svc-5qhh9 +Jun 6 13:21:30.082: INFO: Got endpoints: latency-svc-s89dd [241.022358ms] +Jun 6 13:21:30.094: INFO: Created: latency-svc-4ntgv +Jun 6 13:21:30.123: INFO: Got endpoints: latency-svc-28vpf [268.471898ms] +Jun 6 13:21:30.130: INFO: Created: latency-svc-2qdsw +Jun 6 13:21:30.173: INFO: Got endpoints: latency-svc-m5h27 [317.711962ms] +Jun 6 13:21:30.181: INFO: Created: latency-svc-hln7d +Jun 6 13:21:30.222: INFO: Got endpoints: latency-svc-w9psd [350.748817ms] +Jun 6 13:21:30.230: INFO: Created: latency-svc-sxvdp +Jun 6 13:21:30.271: INFO: Got endpoints: latency-svc-b4xkj [394.472827ms] +Jun 6 13:21:30.279: INFO: Created: latency-svc-9zwc4 +Jun 6 13:21:30.321: INFO: Got endpoints: latency-svc-ldxc9 [430.988184ms] +Jun 6 13:21:30.337: INFO: Created: latency-svc-g6jzn +Jun 6 13:21:30.372: INFO: Got endpoints: latency-svc-h7r79 [473.897778ms] +Jun 6 13:21:30.380: INFO: Created: latency-svc-z46bm +Jun 6 13:21:30.424: INFO: Got endpoints: latency-svc-lpb69 [522.281535ms] +Jun 6 13:21:30.438: INFO: Created: latency-svc-q8l6s +Jun 6 13:21:30.471: INFO: Got endpoints: latency-svc-kvml7 [561.229991ms] +Jun 6 13:21:30.479: INFO: Created: latency-svc-n8nn2 +Jun 6 13:21:30.522: INFO: Got endpoints: latency-svc-s7zvw [596.371734ms] +Jun 6 13:21:30.531: INFO: Created: latency-svc-grdsv +Jun 6 13:21:30.572: INFO: Got endpoints: latency-svc-4qb6w [646.084262ms] +Jun 6 13:21:30.580: INFO: Created: latency-svc-6jv57 +Jun 6 13:21:30.621: INFO: Got endpoints: latency-svc-q95rb [685.28461ms] +Jun 6 13:21:30.629: INFO: Created: latency-svc-k68ww +Jun 6 13:21:30.676: INFO: Got endpoints: latency-svc-zp5cw [731.938316ms] +Jun 6 13:21:30.686: INFO: Created: latency-svc-b2djh +Jun 6 13:21:30.721: INFO: Got endpoints: latency-svc-56grj [742.894622ms] +Jun 6 13:21:30.729: INFO: Created: latency-svc-4x9wl +Jun 6 13:21:30.773: INFO: Got endpoints: latency-svc-5qhh9 [742.404248ms] +Jun 6 13:21:30.781: INFO: Created: latency-svc-992vn +Jun 6 13:21:30.820: INFO: Got endpoints: latency-svc-4ntgv [738.314569ms] +Jun 6 13:21:30.828: INFO: Created: latency-svc-d9d4n +Jun 6 13:21:30.871: INFO: Got endpoints: latency-svc-2qdsw [747.542167ms] +Jun 6 13:21:30.881: INFO: Created: latency-svc-6lm9c +Jun 6 13:21:30.921: INFO: Got endpoints: latency-svc-hln7d [748.370006ms] +Jun 6 13:21:30.929: INFO: Created: latency-svc-xs69m +Jun 6 13:21:30.973: INFO: Got endpoints: latency-svc-sxvdp [750.622756ms] +Jun 6 13:21:30.979: INFO: Created: latency-svc-b9fq5 +Jun 6 13:21:31.021: INFO: Got endpoints: latency-svc-9zwc4 [750.191227ms] +Jun 6 13:21:31.030: INFO: Created: latency-svc-xcc29 +Jun 6 13:21:31.078: INFO: Got endpoints: latency-svc-g6jzn [756.012796ms] +Jun 6 13:21:31.089: INFO: Created: latency-svc-p6fr7 +Jun 6 13:21:31.122: INFO: Got endpoints: latency-svc-z46bm [750.246699ms] +Jun 6 13:21:31.141: INFO: Created: latency-svc-799kx +Jun 6 13:21:31.171: INFO: Got endpoints: latency-svc-q8l6s [746.326439ms] +Jun 6 13:21:31.179: INFO: Created: latency-svc-nlx8t +Jun 6 13:21:31.221: INFO: Got endpoints: latency-svc-n8nn2 [749.797373ms] +Jun 6 13:21:31.229: INFO: Created: latency-svc-mmn4v +Jun 6 13:21:31.272: INFO: Got endpoints: latency-svc-grdsv [749.505499ms] +Jun 6 13:21:31.279: INFO: Created: latency-svc-gzzhr +Jun 6 13:21:31.321: INFO: Got endpoints: latency-svc-6jv57 [749.085377ms] +Jun 6 13:21:31.386: INFO: Created: latency-svc-wgdx2 +Jun 6 13:21:31.386: INFO: Got endpoints: latency-svc-k68ww [764.214859ms] +Jun 6 13:21:31.405: INFO: Created: latency-svc-7ffvr +Jun 6 13:21:31.423: INFO: Got endpoints: latency-svc-b2djh [747.37698ms] +Jun 6 13:21:31.442: INFO: Created: latency-svc-5qdqq +Jun 6 13:21:31.478: INFO: Got endpoints: latency-svc-4x9wl [756.302915ms] +Jun 6 13:21:31.493: INFO: Created: latency-svc-k9tvr +Jun 6 13:21:31.521: INFO: Got endpoints: latency-svc-992vn [747.964953ms] +Jun 6 13:21:31.537: INFO: Created: latency-svc-jz2bv +Jun 6 13:21:31.572: INFO: Got endpoints: latency-svc-d9d4n [751.432468ms] +Jun 6 13:21:31.580: INFO: Created: latency-svc-h2vpq +Jun 6 13:21:31.621: INFO: Got endpoints: latency-svc-6lm9c [749.655981ms] +Jun 6 13:21:31.630: INFO: Created: latency-svc-k5kkk +Jun 6 13:21:31.672: INFO: Got endpoints: latency-svc-xs69m [751.082661ms] +Jun 6 13:21:31.681: INFO: Created: latency-svc-9lll4 +Jun 6 13:21:31.727: INFO: Got endpoints: latency-svc-b9fq5 [753.409687ms] +Jun 6 13:21:31.734: INFO: Created: latency-svc-6xdbp +Jun 6 13:21:31.772: INFO: Got endpoints: latency-svc-xcc29 [750.081962ms] +Jun 6 13:21:31.792: INFO: Created: latency-svc-p8jfv +Jun 6 13:21:31.822: INFO: Got endpoints: latency-svc-p6fr7 [744.140308ms] +Jun 6 13:21:31.832: INFO: Created: latency-svc-sgcxl +Jun 6 13:21:31.872: INFO: Got endpoints: latency-svc-799kx [749.31029ms] +Jun 6 13:21:31.881: INFO: Created: latency-svc-mfrt8 +Jun 6 13:21:31.923: INFO: Got endpoints: latency-svc-nlx8t [751.088308ms] +Jun 6 13:21:31.930: INFO: Created: latency-svc-ps9vv +Jun 6 13:21:31.971: INFO: Got endpoints: latency-svc-mmn4v [749.459176ms] +Jun 6 13:21:31.979: INFO: Created: latency-svc-ssh4n +Jun 6 13:21:32.028: INFO: Got endpoints: latency-svc-gzzhr [755.573634ms] +Jun 6 13:21:32.039: INFO: Created: latency-svc-f4qvg +Jun 6 13:21:32.073: INFO: Got endpoints: latency-svc-wgdx2 [750.884577ms] +Jun 6 13:21:32.080: INFO: Created: latency-svc-vbrx2 +Jun 6 13:21:32.122: INFO: Got endpoints: latency-svc-7ffvr [736.280252ms] +Jun 6 13:21:32.130: INFO: Created: latency-svc-pgblf +Jun 6 13:21:32.172: INFO: Got endpoints: latency-svc-5qdqq [749.307082ms] +Jun 6 13:21:32.179: INFO: Created: latency-svc-v9ppz +Jun 6 13:21:32.221: INFO: Got endpoints: latency-svc-k9tvr [742.445022ms] +Jun 6 13:21:32.229: INFO: Created: latency-svc-25jzt +Jun 6 13:21:32.271: INFO: Got endpoints: latency-svc-jz2bv [750.494508ms] +Jun 6 13:21:32.279: INFO: Created: latency-svc-kghhz +Jun 6 13:21:32.322: INFO: Got endpoints: latency-svc-h2vpq [750.037428ms] +Jun 6 13:21:32.330: INFO: Created: latency-svc-pc9cn +Jun 6 13:21:32.372: INFO: Got endpoints: latency-svc-k5kkk [750.12622ms] +Jun 6 13:21:32.379: INFO: Created: latency-svc-g9sck +Jun 6 13:21:32.422: INFO: Got endpoints: latency-svc-9lll4 [749.841955ms] +Jun 6 13:21:32.454: INFO: Created: latency-svc-fh7hh +Jun 6 13:21:32.482: INFO: Got endpoints: latency-svc-6xdbp [755.517267ms] +Jun 6 13:21:32.542: INFO: Created: latency-svc-v59nr +Jun 6 13:21:32.542: INFO: Got endpoints: latency-svc-p8jfv [770.502559ms] +Jun 6 13:21:32.560: INFO: Created: latency-svc-6g88r +Jun 6 13:21:32.572: INFO: Got endpoints: latency-svc-sgcxl [749.439607ms] +Jun 6 13:21:32.579: INFO: Created: latency-svc-9p2hl +Jun 6 13:21:32.621: INFO: Got endpoints: latency-svc-mfrt8 [749.509815ms] +Jun 6 13:21:32.628: INFO: Created: latency-svc-w8frs +Jun 6 13:21:32.672: INFO: Got endpoints: latency-svc-ps9vv [749.422789ms] +Jun 6 13:21:32.679: INFO: Created: latency-svc-8jhzf +Jun 6 13:21:32.722: INFO: Got endpoints: latency-svc-ssh4n [751.556623ms] +Jun 6 13:21:32.730: INFO: Created: latency-svc-4lwlt +Jun 6 13:21:32.771: INFO: Got endpoints: latency-svc-f4qvg [743.645356ms] +Jun 6 13:21:32.779: INFO: Created: latency-svc-x5lpj +Jun 6 13:21:32.821: INFO: Got endpoints: latency-svc-vbrx2 [748.113241ms] +Jun 6 13:21:32.830: INFO: Created: latency-svc-pd4xv +Jun 6 13:21:32.872: INFO: Got endpoints: latency-svc-pgblf [749.346526ms] +Jun 6 13:21:32.881: INFO: Created: latency-svc-gmpt6 +Jun 6 13:21:32.921: INFO: Got endpoints: latency-svc-v9ppz [748.876118ms] +Jun 6 13:21:32.929: INFO: Created: latency-svc-9g7cm +Jun 6 13:21:32.972: INFO: Got endpoints: latency-svc-25jzt [751.19443ms] +Jun 6 13:21:32.980: INFO: Created: latency-svc-vhpd8 +Jun 6 13:21:33.021: INFO: Got endpoints: latency-svc-kghhz [749.353395ms] +Jun 6 13:21:33.029: INFO: Created: latency-svc-d4lhr +Jun 6 13:21:33.071: INFO: Got endpoints: latency-svc-pc9cn [748.909967ms] +Jun 6 13:21:33.078: INFO: Created: latency-svc-w8pvw +Jun 6 13:21:33.128: INFO: Got endpoints: latency-svc-g9sck [756.319337ms] +Jun 6 13:21:33.135: INFO: Created: latency-svc-btcd9 +Jun 6 13:21:33.172: INFO: Got endpoints: latency-svc-fh7hh [749.451605ms] +Jun 6 13:21:33.179: INFO: Created: latency-svc-z2s2n +Jun 6 13:21:33.221: INFO: Got endpoints: latency-svc-v59nr [739.247241ms] +Jun 6 13:21:33.229: INFO: Created: latency-svc-hh8mm +Jun 6 13:21:33.271: INFO: Got endpoints: latency-svc-6g88r [729.112278ms] +Jun 6 13:21:33.293: INFO: Created: latency-svc-kgcns +Jun 6 13:21:33.322: INFO: Got endpoints: latency-svc-9p2hl [749.836979ms] +Jun 6 13:21:33.329: INFO: Created: latency-svc-mm22j +Jun 6 13:21:33.372: INFO: Got endpoints: latency-svc-w8frs [750.452464ms] +Jun 6 13:21:33.379: INFO: Created: latency-svc-w5tcp +Jun 6 13:21:33.422: INFO: Got endpoints: latency-svc-8jhzf [749.951334ms] +Jun 6 13:21:33.430: INFO: Created: latency-svc-x4sv4 +Jun 6 13:21:33.471: INFO: Got endpoints: latency-svc-4lwlt [748.76026ms] +Jun 6 13:21:33.479: INFO: Created: latency-svc-chnm5 +Jun 6 13:21:33.521: INFO: Got endpoints: latency-svc-x5lpj [749.074996ms] +Jun 6 13:21:33.528: INFO: Created: latency-svc-ppc6v +Jun 6 13:21:33.571: INFO: Got endpoints: latency-svc-pd4xv [750.698036ms] +Jun 6 13:21:33.579: INFO: Created: latency-svc-g58m4 +Jun 6 13:21:33.621: INFO: Got endpoints: latency-svc-gmpt6 [749.600835ms] +Jun 6 13:21:33.628: INFO: Created: latency-svc-c8qwk +Jun 6 13:21:33.671: INFO: Got endpoints: latency-svc-9g7cm [748.440024ms] +Jun 6 13:21:33.679: INFO: Created: latency-svc-qtnl5 +Jun 6 13:21:33.721: INFO: Got endpoints: latency-svc-vhpd8 [748.709203ms] +Jun 6 13:21:33.728: INFO: Created: latency-svc-zw4vb +Jun 6 13:21:33.771: INFO: Got endpoints: latency-svc-d4lhr [750.369757ms] +Jun 6 13:21:33.778: INFO: Created: latency-svc-nzkkf +Jun 6 13:21:33.821: INFO: Got endpoints: latency-svc-w8pvw [749.520848ms] +Jun 6 13:21:33.829: INFO: Created: latency-svc-dhbmv +Jun 6 13:21:33.871: INFO: Got endpoints: latency-svc-btcd9 [743.142786ms] +Jun 6 13:21:33.878: INFO: Created: latency-svc-64vsz +Jun 6 13:21:33.920: INFO: Got endpoints: latency-svc-z2s2n [748.416474ms] +Jun 6 13:21:33.929: INFO: Created: latency-svc-th75q +Jun 6 13:21:33.972: INFO: Got endpoints: latency-svc-hh8mm [750.2509ms] +Jun 6 13:21:33.979: INFO: Created: latency-svc-zw2br +Jun 6 13:21:34.033: INFO: Got endpoints: latency-svc-kgcns [761.352046ms] +Jun 6 13:21:34.046: INFO: Created: latency-svc-rls8n +Jun 6 13:21:34.071: INFO: Got endpoints: latency-svc-mm22j [749.220235ms] +Jun 6 13:21:34.079: INFO: Created: latency-svc-655lh +Jun 6 13:21:34.122: INFO: Got endpoints: latency-svc-w5tcp [749.53281ms] +Jun 6 13:21:34.133: INFO: Created: latency-svc-7ftls +Jun 6 13:21:34.172: INFO: Got endpoints: latency-svc-x4sv4 [749.73861ms] +Jun 6 13:21:34.179: INFO: Created: latency-svc-chksq +Jun 6 13:21:34.221: INFO: Got endpoints: latency-svc-chnm5 [749.848262ms] +Jun 6 13:21:34.229: INFO: Created: latency-svc-scph4 +Jun 6 13:21:34.271: INFO: Got endpoints: latency-svc-ppc6v [750.654142ms] +Jun 6 13:21:34.280: INFO: Created: latency-svc-p666h +Jun 6 13:21:34.321: INFO: Got endpoints: latency-svc-g58m4 [749.292738ms] +Jun 6 13:21:34.329: INFO: Created: latency-svc-7m82v +Jun 6 13:21:34.371: INFO: Got endpoints: latency-svc-c8qwk [749.860191ms] +Jun 6 13:21:34.379: INFO: Created: latency-svc-z8ksq +Jun 6 13:21:34.422: INFO: Got endpoints: latency-svc-qtnl5 [750.974244ms] +Jun 6 13:21:34.429: INFO: Created: latency-svc-7gn6k +Jun 6 13:21:34.471: INFO: Got endpoints: latency-svc-zw4vb [750.734662ms] +Jun 6 13:21:34.479: INFO: Created: latency-svc-j2l9g +Jun 6 13:21:34.521: INFO: Got endpoints: latency-svc-nzkkf [750.198171ms] +Jun 6 13:21:34.529: INFO: Created: latency-svc-bxfn5 +Jun 6 13:21:34.570: INFO: Got endpoints: latency-svc-dhbmv [749.738395ms] +Jun 6 13:21:34.578: INFO: Created: latency-svc-4dlsr +Jun 6 13:21:34.621: INFO: Got endpoints: latency-svc-64vsz [749.630672ms] +Jun 6 13:21:34.629: INFO: Created: latency-svc-7q7mx +Jun 6 13:21:34.672: INFO: Got endpoints: latency-svc-th75q [749.199316ms] +Jun 6 13:21:34.679: INFO: Created: latency-svc-h5ngd +Jun 6 13:21:34.722: INFO: Got endpoints: latency-svc-zw2br [750.175662ms] +Jun 6 13:21:34.730: INFO: Created: latency-svc-fsrvv +Jun 6 13:21:34.772: INFO: Got endpoints: latency-svc-rls8n [739.247215ms] +Jun 6 13:21:34.780: INFO: Created: latency-svc-m2m2m +Jun 6 13:21:34.822: INFO: Got endpoints: latency-svc-655lh [750.653903ms] +Jun 6 13:21:34.829: INFO: Created: latency-svc-whmwt +Jun 6 13:21:34.872: INFO: Got endpoints: latency-svc-7ftls [749.629747ms] +Jun 6 13:21:34.880: INFO: Created: latency-svc-qllxf +Jun 6 13:21:34.921: INFO: Got endpoints: latency-svc-chksq [749.683922ms] +Jun 6 13:21:34.945: INFO: Created: latency-svc-5hccv +Jun 6 13:21:34.982: INFO: Got endpoints: latency-svc-scph4 [760.814198ms] +Jun 6 13:21:34.991: INFO: Created: latency-svc-ltcsv +Jun 6 13:21:35.022: INFO: Got endpoints: latency-svc-p666h [751.041961ms] +Jun 6 13:21:35.031: INFO: Created: latency-svc-kzxbp +Jun 6 13:21:35.075: INFO: Got endpoints: latency-svc-7m82v [753.685774ms] +Jun 6 13:21:35.082: INFO: Created: latency-svc-mt2qx +Jun 6 13:21:35.125: INFO: Got endpoints: latency-svc-z8ksq [753.151344ms] +Jun 6 13:21:35.132: INFO: Created: latency-svc-fmvfs +Jun 6 13:21:35.173: INFO: Got endpoints: latency-svc-7gn6k [750.815262ms] +Jun 6 13:21:35.181: INFO: Created: latency-svc-n9cj7 +Jun 6 13:21:35.229: INFO: Got endpoints: latency-svc-j2l9g [757.074512ms] +Jun 6 13:21:35.237: INFO: Created: latency-svc-dwfs8 +Jun 6 13:21:35.271: INFO: Got endpoints: latency-svc-bxfn5 [749.754687ms] +Jun 6 13:21:35.281: INFO: Created: latency-svc-nthql +Jun 6 13:21:35.322: INFO: Got endpoints: latency-svc-4dlsr [751.254438ms] +Jun 6 13:21:35.329: INFO: Created: latency-svc-nhz52 +Jun 6 13:21:35.372: INFO: Got endpoints: latency-svc-7q7mx [751.257877ms] +Jun 6 13:21:35.382: INFO: Created: latency-svc-nn7lh +Jun 6 13:21:35.422: INFO: Got endpoints: latency-svc-h5ngd [750.542271ms] +Jun 6 13:21:35.430: INFO: Created: latency-svc-6jvw2 +Jun 6 13:21:35.472: INFO: Got endpoints: latency-svc-fsrvv [749.647721ms] +Jun 6 13:21:35.479: INFO: Created: latency-svc-ssj4g +Jun 6 13:21:35.521: INFO: Got endpoints: latency-svc-m2m2m [748.51563ms] +Jun 6 13:21:35.528: INFO: Created: latency-svc-mnj9k +Jun 6 13:21:35.572: INFO: Got endpoints: latency-svc-whmwt [750.26878ms] +Jun 6 13:21:35.579: INFO: Created: latency-svc-hxqrg +Jun 6 13:21:35.621: INFO: Got endpoints: latency-svc-qllxf [748.322586ms] +Jun 6 13:21:35.629: INFO: Created: latency-svc-vxx68 +Jun 6 13:21:35.671: INFO: Got endpoints: latency-svc-5hccv [748.915422ms] +Jun 6 13:21:35.679: INFO: Created: latency-svc-ndztl +Jun 6 13:21:35.722: INFO: Got endpoints: latency-svc-ltcsv [739.712477ms] +Jun 6 13:21:35.729: INFO: Created: latency-svc-74hsj +Jun 6 13:21:35.771: INFO: Got endpoints: latency-svc-kzxbp [748.955113ms] +Jun 6 13:21:35.779: INFO: Created: latency-svc-ml6hm +Jun 6 13:21:35.821: INFO: Got endpoints: latency-svc-mt2qx [746.446339ms] +Jun 6 13:21:35.830: INFO: Created: latency-svc-9ggtx +Jun 6 13:21:35.871: INFO: Got endpoints: latency-svc-fmvfs [746.526652ms] +Jun 6 13:21:35.879: INFO: Created: latency-svc-s25rh +Jun 6 13:21:35.922: INFO: Got endpoints: latency-svc-n9cj7 [748.793921ms] +Jun 6 13:21:35.931: INFO: Created: latency-svc-kj7vf +Jun 6 13:21:35.972: INFO: Got endpoints: latency-svc-dwfs8 [743.030939ms] +Jun 6 13:21:35.979: INFO: Created: latency-svc-cbss4 +Jun 6 13:21:36.021: INFO: Got endpoints: latency-svc-nthql [749.810832ms] +Jun 6 13:21:36.034: INFO: Created: latency-svc-qpjwb +Jun 6 13:21:36.071: INFO: Got endpoints: latency-svc-nhz52 [749.286909ms] +Jun 6 13:21:36.078: INFO: Created: latency-svc-kkrpd +Jun 6 13:21:36.122: INFO: Got endpoints: latency-svc-nn7lh [749.340458ms] +Jun 6 13:21:36.134: INFO: Created: latency-svc-vb5zp +Jun 6 13:21:36.171: INFO: Got endpoints: latency-svc-6jvw2 [749.219971ms] +Jun 6 13:21:36.179: INFO: Created: latency-svc-zrhdh +Jun 6 13:21:36.222: INFO: Got endpoints: latency-svc-ssj4g [750.361279ms] +Jun 6 13:21:36.229: INFO: Created: latency-svc-kkdkt +Jun 6 13:21:36.272: INFO: Got endpoints: latency-svc-mnj9k [750.813591ms] +Jun 6 13:21:36.279: INFO: Created: latency-svc-spq48 +Jun 6 13:21:36.322: INFO: Got endpoints: latency-svc-hxqrg [749.770503ms] +Jun 6 13:21:36.329: INFO: Created: latency-svc-zbwlb +Jun 6 13:21:36.372: INFO: Got endpoints: latency-svc-vxx68 [750.239086ms] +Jun 6 13:21:36.379: INFO: Created: latency-svc-pcdvg +Jun 6 13:21:36.422: INFO: Got endpoints: latency-svc-ndztl [751.129216ms] +Jun 6 13:21:36.430: INFO: Created: latency-svc-ksqd9 +Jun 6 13:21:36.473: INFO: Got endpoints: latency-svc-74hsj [750.802601ms] +Jun 6 13:21:36.480: INFO: Created: latency-svc-bzfzc +Jun 6 13:21:36.522: INFO: Got endpoints: latency-svc-ml6hm [750.368042ms] +Jun 6 13:21:36.532: INFO: Created: latency-svc-g4ntr +Jun 6 13:21:36.571: INFO: Got endpoints: latency-svc-9ggtx [749.675107ms] +Jun 6 13:21:36.580: INFO: Created: latency-svc-kx5xw +Jun 6 13:21:36.622: INFO: Got endpoints: latency-svc-s25rh [750.051732ms] +Jun 6 13:21:36.639: INFO: Created: latency-svc-rxlbr +Jun 6 13:21:36.671: INFO: Got endpoints: latency-svc-kj7vf [749.090975ms] +Jun 6 13:21:36.679: INFO: Created: latency-svc-h9cq7 +Jun 6 13:21:36.721: INFO: Got endpoints: latency-svc-cbss4 [749.155296ms] +Jun 6 13:21:36.729: INFO: Created: latency-svc-785xh +Jun 6 13:21:36.772: INFO: Got endpoints: latency-svc-qpjwb [750.705848ms] +Jun 6 13:21:36.779: INFO: Created: latency-svc-8krrk +Jun 6 13:21:36.821: INFO: Got endpoints: latency-svc-kkrpd [749.288758ms] +Jun 6 13:21:36.830: INFO: Created: latency-svc-kdmlw +Jun 6 13:21:36.872: INFO: Got endpoints: latency-svc-vb5zp [749.991365ms] +Jun 6 13:21:36.879: INFO: Created: latency-svc-qxphn +Jun 6 13:21:36.921: INFO: Got endpoints: latency-svc-zrhdh [749.958616ms] +Jun 6 13:21:36.929: INFO: Created: latency-svc-8xpnr +Jun 6 13:21:36.972: INFO: Got endpoints: latency-svc-kkdkt [750.158679ms] +Jun 6 13:21:36.980: INFO: Created: latency-svc-cs8z4 +Jun 6 13:21:37.022: INFO: Got endpoints: latency-svc-spq48 [750.330049ms] +Jun 6 13:21:37.030: INFO: Created: latency-svc-7wv9j +Jun 6 13:21:37.071: INFO: Got endpoints: latency-svc-zbwlb [749.203665ms] +Jun 6 13:21:37.080: INFO: Created: latency-svc-vkpmd +Jun 6 13:21:37.122: INFO: Got endpoints: latency-svc-pcdvg [750.634225ms] +Jun 6 13:21:37.129: INFO: Created: latency-svc-krctc +Jun 6 13:21:37.172: INFO: Got endpoints: latency-svc-ksqd9 [749.066853ms] +Jun 6 13:21:37.179: INFO: Created: latency-svc-jwwvx +Jun 6 13:21:37.222: INFO: Got endpoints: latency-svc-bzfzc [749.307757ms] +Jun 6 13:21:37.230: INFO: Created: latency-svc-p9sxk +Jun 6 13:21:37.272: INFO: Got endpoints: latency-svc-g4ntr [750.553901ms] +Jun 6 13:21:37.280: INFO: Created: latency-svc-p62ff +Jun 6 13:21:37.321: INFO: Got endpoints: latency-svc-kx5xw [749.933175ms] +Jun 6 13:21:37.329: INFO: Created: latency-svc-chvbj +Jun 6 13:21:37.372: INFO: Got endpoints: latency-svc-rxlbr [750.066406ms] +Jun 6 13:21:37.381: INFO: Created: latency-svc-6drzh +Jun 6 13:21:37.422: INFO: Got endpoints: latency-svc-h9cq7 [750.737366ms] +Jun 6 13:21:37.430: INFO: Created: latency-svc-sfx97 +Jun 6 13:21:37.471: INFO: Got endpoints: latency-svc-785xh [749.706013ms] +Jun 6 13:21:37.479: INFO: Created: latency-svc-hnnw9 +Jun 6 13:21:37.522: INFO: Got endpoints: latency-svc-8krrk [750.007758ms] +Jun 6 13:21:37.572: INFO: Got endpoints: latency-svc-kdmlw [750.295339ms] +Jun 6 13:21:37.622: INFO: Got endpoints: latency-svc-qxphn [750.512906ms] +Jun 6 13:21:37.671: INFO: Got endpoints: latency-svc-8xpnr [749.713284ms] +Jun 6 13:21:37.722: INFO: Got endpoints: latency-svc-cs8z4 [749.590218ms] +Jun 6 13:21:37.773: INFO: Got endpoints: latency-svc-7wv9j [751.289431ms] +Jun 6 13:21:37.822: INFO: Got endpoints: latency-svc-vkpmd [750.953858ms] +Jun 6 13:21:37.872: INFO: Got endpoints: latency-svc-krctc [749.496544ms] +Jun 6 13:21:37.921: INFO: Got endpoints: latency-svc-jwwvx [749.443859ms] +Jun 6 13:21:37.972: INFO: Got endpoints: latency-svc-p9sxk [750.223524ms] +Jun 6 13:21:38.027: INFO: Got endpoints: latency-svc-p62ff [754.304458ms] +Jun 6 13:21:38.072: INFO: Got endpoints: latency-svc-chvbj [750.591349ms] +Jun 6 13:21:38.121: INFO: Got endpoints: latency-svc-6drzh [749.353845ms] +Jun 6 13:21:38.173: INFO: Got endpoints: latency-svc-sfx97 [750.375741ms] +Jun 6 13:21:38.222: INFO: Got endpoints: latency-svc-hnnw9 [750.834737ms] +Jun 6 13:21:38.222: INFO: Latencies: [51.72967ms 60.410853ms 64.537508ms 65.13051ms 65.890424ms 71.092505ms 75.853215ms 78.235561ms 78.570184ms 78.915805ms 80.681252ms 80.805438ms 82.675376ms 83.111965ms 90.353107ms 92.235922ms 95.563678ms 102.158074ms 105.794122ms 106.340772ms 106.474726ms 107.294679ms 107.517451ms 107.673538ms 111.284985ms 112.817837ms 114.601864ms 119.356983ms 121.068027ms 121.734558ms 121.789384ms 123.303611ms 123.438525ms 124.685379ms 148.413069ms 196.314706ms 241.022358ms 268.471898ms 317.711962ms 350.748817ms 394.472827ms 430.988184ms 473.897778ms 522.281535ms 561.229991ms 596.371734ms 646.084262ms 685.28461ms 729.112278ms 731.938316ms 736.280252ms 738.314569ms 739.247215ms 739.247241ms 739.712477ms 742.404248ms 742.445022ms 742.894622ms 743.030939ms 743.142786ms 743.645356ms 744.140308ms 746.326439ms 746.446339ms 746.526652ms 747.37698ms 747.542167ms 747.964953ms 748.113241ms 748.322586ms 748.370006ms 748.416474ms 748.440024ms 748.51563ms 748.709203ms 748.76026ms 748.793921ms 748.876118ms 748.909967ms 748.915422ms 748.955113ms 749.066853ms 749.074996ms 749.085377ms 749.090975ms 749.155296ms 749.199316ms 749.203665ms 749.219971ms 749.220235ms 749.286909ms 749.288758ms 749.292738ms 749.307082ms 749.307757ms 749.31029ms 749.340458ms 749.346526ms 749.353395ms 749.353845ms 749.422789ms 749.439607ms 749.443859ms 749.451605ms 749.459176ms 749.496544ms 749.505499ms 749.509815ms 749.520848ms 749.53281ms 749.590218ms 749.600835ms 749.629747ms 749.630672ms 749.647721ms 749.655981ms 749.675107ms 749.683922ms 749.706013ms 749.713284ms 749.738395ms 749.73861ms 749.754687ms 749.770503ms 749.797373ms 749.810832ms 749.836979ms 749.841955ms 749.848262ms 749.860191ms 749.933175ms 749.951334ms 749.958616ms 749.991365ms 750.007758ms 750.037428ms 750.051732ms 750.066406ms 750.081962ms 750.12622ms 750.158679ms 750.175662ms 750.191227ms 750.198171ms 750.223524ms 750.239086ms 750.246699ms 750.2509ms 750.26878ms 750.295339ms 750.330049ms 750.361279ms 750.368042ms 750.369757ms 750.375741ms 750.452464ms 750.494508ms 750.512906ms 750.542271ms 750.553901ms 750.591349ms 750.622756ms 750.634225ms 750.653903ms 750.654142ms 750.698036ms 750.705848ms 750.734662ms 750.737366ms 750.802601ms 750.813591ms 750.815262ms 750.834737ms 750.884577ms 750.953858ms 750.974244ms 751.041961ms 751.082661ms 751.088308ms 751.129216ms 751.19443ms 751.254438ms 751.257877ms 751.289431ms 751.432468ms 751.556623ms 753.151344ms 753.409687ms 753.685774ms 754.304458ms 755.517267ms 755.573634ms 756.012796ms 756.302915ms 756.319337ms 757.074512ms 760.814198ms 761.352046ms 764.214859ms 770.502559ms] +Jun 6 13:21:38.222: INFO: 50 %ile: 749.422789ms +Jun 6 13:21:38.222: INFO: 90 %ile: 751.19443ms +Jun 6 13:21:38.222: INFO: 99 %ile: 764.214859ms +Jun 6 13:21:38.222: INFO: Total sample count: 200 +[AfterEach] [sig-network] Service endpoints latency + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:21:38.222: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "svc-latency-2928" for this suite. +Jun 6 13:21:58.233: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:21:58.349: INFO: namespace svc-latency-2928 deletion completed in 20.123394479s + +• [SLOW TEST:30.982 seconds] +[sig-network] Service endpoints latency +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22 + should not be very high [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl run default + should create an rc or deployment from an image [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:21:58.350: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in kubectl-475 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213 +[BeforeEach] [k8s.io] Kubectl run default + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1318 +[It] should create an rc or deployment from an image [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: running the image docker.io/library/nginx:1.14-alpine +Jun 6 13:21:58.475: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 run e2e-test-nginx-deployment --image=docker.io/library/nginx:1.14-alpine --namespace=kubectl-475' +Jun 6 13:21:58.699: INFO: stderr: "kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n" +Jun 6 13:21:58.699: INFO: stdout: "deployment.apps/e2e-test-nginx-deployment created\n" +STEP: verifying the pod controlled by e2e-test-nginx-deployment gets created +[AfterEach] [k8s.io] Kubectl run default + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1324 +Jun 6 13:21:58.708: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 delete deployment e2e-test-nginx-deployment --namespace=kubectl-475' +Jun 6 13:21:58.788: INFO: stderr: "" +Jun 6 13:21:58.788: INFO: stdout: "deployment.extensions \"e2e-test-nginx-deployment\" deleted\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:21:58.788: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-475" for this suite. +Jun 6 13:22:04.803: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:22:04.952: INFO: namespace kubectl-475 deletion completed in 6.157745919s + +• [SLOW TEST:6.603 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + [k8s.io] Kubectl run default + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should create an rc or deployment from an image [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSS +------------------------------ +[sig-storage] Projected configMap + should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Projected configMap + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:22:04.953: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-4829 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating configMap with name projected-configmap-test-volume-13ba645d-885e-11e9-b613-8a9bc7c14a19 +STEP: Creating a pod to test consume configMaps +Jun 6 13:22:05.087: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-13bac7b1-885e-11e9-b613-8a9bc7c14a19" in namespace "projected-4829" to be "success or failure" +Jun 6 13:22:05.091: INFO: Pod "pod-projected-configmaps-13bac7b1-885e-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 3.761692ms +Jun 6 13:22:07.093: INFO: Pod "pod-projected-configmaps-13bac7b1-885e-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006246925s +STEP: Saw pod success +Jun 6 13:22:07.093: INFO: Pod "pod-projected-configmaps-13bac7b1-885e-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure" +Jun 6 13:22:07.095: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-projected-configmaps-13bac7b1-885e-11e9-b613-8a9bc7c14a19 container projected-configmap-volume-test: +STEP: delete the pod +Jun 6 13:22:07.109: INFO: Waiting for pod pod-projected-configmaps-13bac7b1-885e-11e9-b613-8a9bc7c14a19 to disappear +Jun 6 13:22:07.113: INFO: Pod pod-projected-configmaps-13bac7b1-885e-11e9-b613-8a9bc7c14a19 no longer exists +[AfterEach] [sig-storage] Projected configMap + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:22:07.114: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-4829" for this suite. +Jun 6 13:22:13.124: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:22:13.211: INFO: namespace projected-4829 deletion completed in 6.095276848s + +• [SLOW TEST:8.258 seconds] +[sig-storage] Projected configMap +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:33 + should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] ReplicaSet + should serve a basic image on each replica with a public image [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-apps] ReplicaSet + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:22:13.212: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename replicaset +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in replicaset-2531 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should serve a basic image on each replica with a public image [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +Jun 6 13:22:13.335: INFO: Creating ReplicaSet my-hostname-basic-18a622cb-885e-11e9-b613-8a9bc7c14a19 +Jun 6 13:22:13.343: INFO: Pod name my-hostname-basic-18a622cb-885e-11e9-b613-8a9bc7c14a19: Found 0 pods out of 1 +Jun 6 13:22:18.346: INFO: Pod name my-hostname-basic-18a622cb-885e-11e9-b613-8a9bc7c14a19: Found 1 pods out of 1 +Jun 6 13:22:18.346: INFO: Ensuring a pod for ReplicaSet "my-hostname-basic-18a622cb-885e-11e9-b613-8a9bc7c14a19" is running +Jun 6 13:22:18.348: INFO: Pod "my-hostname-basic-18a622cb-885e-11e9-b613-8a9bc7c14a19-p4d2c" is running (conditions: [{Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-06-06 13:22:13 +0000 UTC Reason: Message:} {Type:Ready Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-06-06 13:22:16 +0000 UTC Reason: Message:} {Type:ContainersReady Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-06-06 13:22:16 +0000 UTC Reason: Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-06-06 13:22:13 +0000 UTC Reason: Message:}]) +Jun 6 13:22:18.348: INFO: Trying to dial the pod +Jun 6 13:22:23.356: INFO: Controller my-hostname-basic-18a622cb-885e-11e9-b613-8a9bc7c14a19: Got expected result from replica 1 [my-hostname-basic-18a622cb-885e-11e9-b613-8a9bc7c14a19-p4d2c]: "my-hostname-basic-18a622cb-885e-11e9-b613-8a9bc7c14a19-p4d2c", 1 of 1 required successes so far +[AfterEach] [sig-apps] ReplicaSet + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:22:23.356: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "replicaset-2531" for this suite. +Jun 6 13:22:29.367: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:22:29.480: INFO: namespace replicaset-2531 deletion completed in 6.121481327s + +• [SLOW TEST:16.269 seconds] +[sig-apps] ReplicaSet +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + should serve a basic image on each replica with a public image [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl api-versions + should check if v1 is in available api versions [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:22:29.483: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in kubectl-2776 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213 +[It] should check if v1 is in available api versions [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: validating api versions +Jun 6 13:22:29.640: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 api-versions' +Jun 6 13:22:29.701: INFO: stderr: "" +Jun 6 13:22:29.701: INFO: stdout: "admissionregistration.k8s.io/v1beta1\napiextensions.k8s.io/v1beta1\napiregistration.k8s.io/v1\napiregistration.k8s.io/v1beta1\napps/v1\napps/v1beta1\napps/v1beta2\nauthentication.k8s.io/v1\nauthentication.k8s.io/v1beta1\nauthorization.k8s.io/v1\nauthorization.k8s.io/v1beta1\nautoscaling/v1\nautoscaling/v2beta1\nautoscaling/v2beta2\nbatch/v1\nbatch/v1beta1\ncertificates.k8s.io/v1beta1\ncoordination.k8s.io/v1\ncoordination.k8s.io/v1beta1\ncrd.projectcalico.org/v1\nevents.k8s.io/v1beta1\nextensions/v1beta1\nmetrics.k8s.io/v1beta1\nnetworking.k8s.io/v1\nnetworking.k8s.io/v1beta1\nnode.k8s.io/v1beta1\npolicy/v1beta1\nrbac.authorization.k8s.io/v1\nrbac.authorization.k8s.io/v1beta1\nscheduling.k8s.io/v1\nscheduling.k8s.io/v1beta1\nstorage.k8s.io/v1\nstorage.k8s.io/v1beta1\nv1\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:22:29.702: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-2776" for this suite. +Jun 6 13:22:35.711: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:22:35.779: INFO: namespace kubectl-2776 deletion completed in 6.07517139s + +• [SLOW TEST:6.296 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + [k8s.io] Kubectl api-versions + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should check if v1 is in available api versions [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SS +------------------------------ +[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook + should execute poststart http hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [k8s.io] Container Lifecycle Hook + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:22:35.779: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename container-lifecycle-hook +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in container-lifecycle-hook-5912 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] when create a pod with lifecycle hook + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:61 +STEP: create the container to handle the HTTPGet hook request. +[It] should execute poststart http hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: create the pod with lifecycle hook +STEP: check poststart hook +STEP: delete the pod with lifecycle hook +Jun 6 13:22:39.945: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Jun 6 13:22:39.947: INFO: Pod pod-with-poststart-http-hook still exists +Jun 6 13:22:41.947: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Jun 6 13:22:41.949: INFO: Pod pod-with-poststart-http-hook still exists +Jun 6 13:22:43.947: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Jun 6 13:22:43.949: INFO: Pod pod-with-poststart-http-hook still exists +Jun 6 13:22:45.947: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Jun 6 13:22:45.950: INFO: Pod pod-with-poststart-http-hook no longer exists +[AfterEach] [k8s.io] Container Lifecycle Hook + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:22:45.950: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-lifecycle-hook-5912" for this suite. +Jun 6 13:23:07.959: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:23:08.080: INFO: namespace container-lifecycle-hook-5912 deletion completed in 22.127790426s + +• [SLOW TEST:32.300 seconds] +[k8s.io] Container Lifecycle Hook +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + when create a pod with lifecycle hook + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:40 + should execute poststart http hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSS +------------------------------ +[sig-apps] Deployment + deployment should support rollover [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-apps] Deployment + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:23:08.080: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename deployment +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in deployment-8064 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Deployment + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:65 +[It] deployment should support rollover [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +Jun 6 13:23:08.260: INFO: Pod name rollover-pod: Found 0 pods out of 1 +Jun 6 13:23:13.265: INFO: Pod name rollover-pod: Found 1 pods out of 1 +STEP: ensuring each pod is running +Jun 6 13:23:13.265: INFO: Waiting for pods owned by replica set "test-rollover-controller" to become ready +Jun 6 13:23:15.267: INFO: Creating deployment "test-rollover-deployment" +Jun 6 13:23:15.321: INFO: Make sure deployment "test-rollover-deployment" performs scaling operations +Jun 6 13:23:17.333: INFO: Check revision of new replica set for deployment "test-rollover-deployment" +Jun 6 13:23:17.336: INFO: Ensure that both replica sets have 1 created replica +Jun 6 13:23:17.340: INFO: Rollover old replica sets for deployment "test-rollover-deployment" with new image update +Jun 6 13:23:17.346: INFO: Updating deployment test-rollover-deployment +Jun 6 13:23:17.346: INFO: Wait deployment "test-rollover-deployment" to be observed by the deployment controller +Jun 6 13:23:19.355: INFO: Wait for revision update of deployment "test-rollover-deployment" to 2 +Jun 6 13:23:19.360: INFO: Make sure deployment "test-rollover-deployment" is complete +Jun 6 13:23:19.363: INFO: all replica sets need to contain the pod-template-hash label +Jun 6 13:23:19.363: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:1, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424195, loc:(*time.Location)(0x8a140e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424195, loc:(*time.Location)(0x8a140e0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424197, loc:(*time.Location)(0x8a140e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424195, loc:(*time.Location)(0x8a140e0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-766b4d6c9d\" is progressing."}}, CollisionCount:(*int32)(nil)} +Jun 6 13:23:21.367: INFO: all replica sets need to contain the pod-template-hash label +Jun 6 13:23:21.368: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424195, loc:(*time.Location)(0x8a140e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424195, loc:(*time.Location)(0x8a140e0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424200, loc:(*time.Location)(0x8a140e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424195, loc:(*time.Location)(0x8a140e0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-766b4d6c9d\" is progressing."}}, CollisionCount:(*int32)(nil)} +Jun 6 13:23:23.420: INFO: all replica sets need to contain the pod-template-hash label +Jun 6 13:23:23.420: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424195, loc:(*time.Location)(0x8a140e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424195, loc:(*time.Location)(0x8a140e0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424200, loc:(*time.Location)(0x8a140e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424195, loc:(*time.Location)(0x8a140e0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-766b4d6c9d\" is progressing."}}, CollisionCount:(*int32)(nil)} +Jun 6 13:23:25.368: INFO: all replica sets need to contain the pod-template-hash label +Jun 6 13:23:25.368: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424195, loc:(*time.Location)(0x8a140e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424195, loc:(*time.Location)(0x8a140e0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424200, loc:(*time.Location)(0x8a140e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424195, loc:(*time.Location)(0x8a140e0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-766b4d6c9d\" is progressing."}}, CollisionCount:(*int32)(nil)} +Jun 6 13:23:27.368: INFO: all replica sets need to contain the pod-template-hash label +Jun 6 13:23:27.368: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424195, loc:(*time.Location)(0x8a140e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424195, loc:(*time.Location)(0x8a140e0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424200, loc:(*time.Location)(0x8a140e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424195, loc:(*time.Location)(0x8a140e0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-766b4d6c9d\" is progressing."}}, CollisionCount:(*int32)(nil)} +Jun 6 13:23:29.372: INFO: all replica sets need to contain the pod-template-hash label +Jun 6 13:23:29.372: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424195, loc:(*time.Location)(0x8a140e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424195, loc:(*time.Location)(0x8a140e0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424200, loc:(*time.Location)(0x8a140e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424195, loc:(*time.Location)(0x8a140e0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-766b4d6c9d\" is progressing."}}, CollisionCount:(*int32)(nil)} +Jun 6 13:23:31.371: INFO: +Jun 6 13:23:31.371: INFO: Ensure that both old replica sets have no replicas +[AfterEach] [sig-apps] Deployment + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:59 +Jun 6 13:23:31.376: INFO: Deployment "test-rollover-deployment": +&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-deployment,GenerateName:,Namespace:deployment-8064,SelfLink:/apis/apps/v1/namespaces/deployment-8064/deployments/test-rollover-deployment,UID:3d905ccd-885e-11e9-bdc9-0231d0af67bc,ResourceVersion:12537,Generation:2,CreationTimestamp:2019-06-06 13:23:15 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,},Annotations:map[string]string{deployment.kubernetes.io/revision: 2,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:DeploymentSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:0,MaxSurge:1,},},MinReadySeconds:10,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[{Available True 2019-06-06 13:23:15 +0000 UTC 2019-06-06 13:23:15 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} {Progressing True 2019-06-06 13:23:30 +0000 UTC 2019-06-06 13:23:15 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-rollover-deployment-766b4d6c9d" has successfully progressed.}],ReadyReplicas:1,CollisionCount:nil,},} + +Jun 6 13:23:31.379: INFO: New ReplicaSet "test-rollover-deployment-766b4d6c9d" of Deployment "test-rollover-deployment": +&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-deployment-766b4d6c9d,GenerateName:,Namespace:deployment-8064,SelfLink:/apis/apps/v1/namespaces/deployment-8064/replicasets/test-rollover-deployment-766b4d6c9d,UID:3ecd5ce7-885e-11e9-bdc9-0231d0af67bc,ResourceVersion:12527,Generation:2,CreationTimestamp:2019-06-06 13:23:17 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 766b4d6c9d,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 2,},OwnerReferences:[{apps/v1 Deployment test-rollover-deployment 3d905ccd-885e-11e9-bdc9-0231d0af67bc 0xc002769687 0xc002769688}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod-template-hash: 766b4d6c9d,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 766b4d6c9d,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:10,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:2,ReadyReplicas:1,AvailableReplicas:1,Conditions:[],},} +Jun 6 13:23:31.379: INFO: All old ReplicaSets of Deployment "test-rollover-deployment": +Jun 6 13:23:31.379: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-controller,GenerateName:,Namespace:deployment-8064,SelfLink:/apis/apps/v1/namespaces/deployment-8064/replicasets/test-rollover-controller,UID:39623c5d-885e-11e9-bdc9-0231d0af67bc,ResourceVersion:12535,Generation:2,CreationTimestamp:2019-06-06 13:23:08 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod: nginx,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,},OwnerReferences:[{apps/v1 Deployment test-rollover-deployment 3d905ccd-885e-11e9-bdc9-0231d0af67bc 0xc0027694d7 0xc0027694d8}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*0,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod: nginx,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod: nginx,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},} +Jun 6 13:23:31.379: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-deployment-6455657675,GenerateName:,Namespace:deployment-8064,SelfLink:/apis/apps/v1/namespaces/deployment-8064/replicasets/test-rollover-deployment-6455657675,UID:3d9854b8-885e-11e9-bdc9-0231d0af67bc,ResourceVersion:12490,Generation:2,CreationTimestamp:2019-06-06 13:23:15 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 6455657675,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 1,},OwnerReferences:[{apps/v1 Deployment test-rollover-deployment 3d905ccd-885e-11e9-bdc9-0231d0af67bc 0xc0027695a7 0xc0027695a8}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*0,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod-template-hash: 6455657675,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 6455657675,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{redis-slave gcr.io/google_samples/gb-redisslave:nonexistent [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:10,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},} +Jun 6 13:23:31.381: INFO: Pod "test-rollover-deployment-766b4d6c9d-n6kmg" is available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-deployment-766b4d6c9d-n6kmg,GenerateName:test-rollover-deployment-766b4d6c9d-,Namespace:deployment-8064,SelfLink:/api/v1/namespaces/deployment-8064/pods/test-rollover-deployment-766b4d6c9d-n6kmg,UID:3ecfe47b-885e-11e9-bdc9-0231d0af67bc,ResourceVersion:12510,Generation:0,CreationTimestamp:2019-06-06 13:23:17 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 766b4d6c9d,},Annotations:map[string]string{cni.projectcalico.org/podIP: 100.96.2.68/32,kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet test-rollover-deployment-766b4d6c9d 3ecd5ce7-885e-11e9-bdc9-0231d0af67bc 0xc0023c21b7 0xc0023c21b8}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-n22ks {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-n22ks,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] [] [] [] [] {map[] map[]} [{default-token-n22ks true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-66-200.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc0023c2220} {node.kubernetes.io/unreachable Exists NoExecute 0xc0023c2240}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:23:17 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:23:20 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:23:20 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:23:17 +0000 UTC }],Message:,Reason:,HostIP:172.16.66.200,PodIP:100.96.2.68,StartTime:2019-06-06 13:23:17 +0000 UTC,ContainerStatuses:[{redis {nil ContainerStateRunning{StartedAt:2019-06-06 13:23:19 +0000 UTC,} nil} {nil nil nil} true 0 gcr.io/kubernetes-e2e-test-images/redis:1.0 docker-pullable://gcr.io/kubernetes-e2e-test-images/redis@sha256:af4748d1655c08dc54d4be5182135395db9ce87aba2d4699b26b14ae197c5830 docker://ac9a0eb7fd59ebc545c306d15a5e55c2993a599d2da7cd0a118e8dc6aaa6e133}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +[AfterEach] [sig-apps] Deployment + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:23:31.382: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "deployment-8064" for this suite. +Jun 6 13:23:37.392: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:23:37.472: INFO: namespace deployment-8064 deletion completed in 6.087913107s + +• [SLOW TEST:29.392 seconds] +[sig-apps] Deployment +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + deployment should support rollover [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Secrets + should be consumable via the environment [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-api-machinery] Secrets + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:23:37.473: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename secrets +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in secrets-7227 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable via the environment [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: creating secret secrets-7227/secret-test-4adfaa00-885e-11e9-b613-8a9bc7c14a19 +STEP: Creating a pod to test consume secrets +Jun 6 13:23:37.607: INFO: Waiting up to 5m0s for pod "pod-configmaps-4ae01675-885e-11e9-b613-8a9bc7c14a19" in namespace "secrets-7227" to be "success or failure" +Jun 6 13:23:37.610: INFO: Pod "pod-configmaps-4ae01675-885e-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 3.32311ms +Jun 6 13:23:39.613: INFO: Pod "pod-configmaps-4ae01675-885e-11e9-b613-8a9bc7c14a19": Phase="Running", Reason="", readiness=true. Elapsed: 2.006016077s +Jun 6 13:23:41.615: INFO: Pod "pod-configmaps-4ae01675-885e-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.008677786s +STEP: Saw pod success +Jun 6 13:23:41.615: INFO: Pod "pod-configmaps-4ae01675-885e-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure" +Jun 6 13:23:41.617: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-configmaps-4ae01675-885e-11e9-b613-8a9bc7c14a19 container env-test: +STEP: delete the pod +Jun 6 13:23:41.631: INFO: Waiting for pod pod-configmaps-4ae01675-885e-11e9-b613-8a9bc7c14a19 to disappear +Jun 6 13:23:41.633: INFO: Pod pod-configmaps-4ae01675-885e-11e9-b613-8a9bc7c14a19 no longer exists +[AfterEach] [sig-api-machinery] Secrets + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:23:41.633: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-7227" for this suite. +Jun 6 13:23:47.649: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:23:47.720: INFO: namespace secrets-7227 deletion completed in 6.084396467s + +• [SLOW TEST:10.247 seconds] +[sig-api-machinery] Secrets +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets.go:32 + should be consumable via the environment [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +[k8s.io] InitContainer [NodeConformance] + should not start app containers if init containers fail on a RestartAlways pod [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:23:47.720: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename init-container +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in init-container-1244 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:43 +[It] should not start app containers if init containers fail on a RestartAlways pod [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: creating the pod +Jun 6 13:23:47.894: INFO: PodSpec: initContainers in spec.initContainers +Jun 6 13:24:28.818: INFO: init container has failed twice: &v1.Pod{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"pod-init-5102b754-885e-11e9-b613-8a9bc7c14a19", GenerateName:"", Namespace:"init-container-1244", SelfLink:"/api/v1/namespaces/init-container-1244/pods/pod-init-5102b754-885e-11e9-b613-8a9bc7c14a19", UID:"5103160b-885e-11e9-bdc9-0231d0af67bc", ResourceVersion:"12724", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63695424227, loc:(*time.Location)(0x8a140e0)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"name":"foo", "time":"894669855"}, Annotations:map[string]string{"cni.projectcalico.org/podIP":"100.96.2.70/32", "kubernetes.io/psp":"e2e-test-privileged-psp"}, OwnerReferences:[]v1.OwnerReference(nil), Initializers:(*v1.Initializers)(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Spec:v1.PodSpec{Volumes:[]v1.Volume{v1.Volume{Name:"default-token-qfd6f", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(nil), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(0xc001c4c180), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil)}}}, InitContainers:[]v1.Container{v1.Container{Name:"init1", Image:"docker.io/library/busybox:1.29", Command:[]string{"/bin/false"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-qfd6f", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}, v1.Container{Name:"init2", Image:"docker.io/library/busybox:1.29", Command:[]string{"/bin/true"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-qfd6f", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, Containers:[]v1.Container{v1.Container{Name:"run1", Image:"k8s.gcr.io/pause:3.1", Command:[]string(nil), Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:52428800, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"52428800", Format:"DecimalSI"}}, Requests:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:52428800, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"52428800", Format:"DecimalSI"}}}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-qfd6f", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc002efa2a8), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string(nil), ServiceAccountName:"default", DeprecatedServiceAccount:"default", AutomountServiceAccountToken:(*bool)(nil), NodeName:"ip-172-16-66-200.ec2.internal", HostNetwork:false, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc0012b80c0), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(nil), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration{v1.Toleration{Key:"node.kubernetes.io/not-ready", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc002efa320)}, v1.Toleration{Key:"node.kubernetes.io/unreachable", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc002efa340)}}, HostAliases:[]v1.HostAlias(nil), PriorityClassName:"", Priority:(*int32)(0xc002efa348), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(0xc002efa34c)}, Status:v1.PodStatus{Phase:"Pending", Conditions:[]v1.PodCondition{v1.PodCondition{Type:"Initialized", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424227, loc:(*time.Location)(0x8a140e0)}}, Reason:"ContainersNotInitialized", Message:"containers with incomplete status: [init1 init2]"}, v1.PodCondition{Type:"Ready", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424227, loc:(*time.Location)(0x8a140e0)}}, Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"ContainersReady", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424227, loc:(*time.Location)(0x8a140e0)}}, Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"PodScheduled", Status:"True", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424227, loc:(*time.Location)(0x8a140e0)}}, Reason:"", Message:""}}, Message:"", Reason:"", NominatedNodeName:"", HostIP:"172.16.66.200", PodIP:"100.96.2.70", StartTime:(*v1.Time)(0xc00290c160), InitContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"init1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(0xc0025081c0)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:3, Image:"busybox:1.29", ImageID:"docker-pullable://busybox@sha256:8ccbac733d19c0dd4d70b4f0c1e12245b5fa3ad24758a11035ee505c629c0796", ContainerID:"docker://f7726ea8ab9d91dbe41c978a1ea92fa0437513b9ba082bce4f79997abb2d5b60"}, v1.ContainerStatus{Name:"init2", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc00290c1a0), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"docker.io/library/busybox:1.29", ImageID:"", ContainerID:""}}, ContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"run1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc00290c180), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"k8s.gcr.io/pause:3.1", ImageID:"", ContainerID:""}}, QOSClass:"Guaranteed"}} +[AfterEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:24:28.818: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "init-container-1244" for this suite. +Jun 6 13:24:50.829: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:24:50.963: INFO: namespace init-container-1244 deletion completed in 22.14242764s + +• [SLOW TEST:63.243 seconds] +[k8s.io] InitContainer [NodeConformance] +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should not start app containers if init containers fail on a RestartAlways pod [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSS +------------------------------ +[sig-storage] ConfigMap + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] ConfigMap + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:24:50.963: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename configmap +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in configmap-2782 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating configMap with name configmap-test-volume-map-76b62115-885e-11e9-b613-8a9bc7c14a19 +STEP: Creating a pod to test consume configMaps +Jun 6 13:24:51.154: INFO: Waiting up to 5m0s for pod "pod-configmaps-76b67c4e-885e-11e9-b613-8a9bc7c14a19" in namespace "configmap-2782" to be "success or failure" +Jun 6 13:24:51.158: INFO: Pod "pod-configmaps-76b67c4e-885e-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 3.954839ms +Jun 6 13:24:53.221: INFO: Pod "pod-configmaps-76b67c4e-885e-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.06688655s +Jun 6 13:24:55.223: INFO: Pod "pod-configmaps-76b67c4e-885e-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.06925701s +STEP: Saw pod success +Jun 6 13:24:55.223: INFO: Pod "pod-configmaps-76b67c4e-885e-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure" +Jun 6 13:24:55.225: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-configmaps-76b67c4e-885e-11e9-b613-8a9bc7c14a19 container configmap-volume-test: +STEP: delete the pod +Jun 6 13:24:55.240: INFO: Waiting for pod pod-configmaps-76b67c4e-885e-11e9-b613-8a9bc7c14a19 to disappear +Jun 6 13:24:55.242: INFO: Pod pod-configmaps-76b67c4e-885e-11e9-b613-8a9bc7c14a19 no longer exists +[AfterEach] [sig-storage] ConfigMap + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:24:55.242: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-2782" for this suite. +Jun 6 13:25:01.250: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:25:01.352: INFO: namespace configmap-2782 deletion completed in 6.108722748s + +• [SLOW TEST:10.389 seconds] +[sig-storage] ConfigMap +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32 + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl patch + should add annotations for pods in rc [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:25:01.353: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in kubectl-8978 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213 +[It] should add annotations for pods in rc [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: creating Redis RC +Jun 6 13:25:01.479: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 create -f - --namespace=kubectl-8978' +Jun 6 13:25:01.626: INFO: stderr: "" +Jun 6 13:25:01.626: INFO: stdout: "replicationcontroller/redis-master created\n" +STEP: Waiting for Redis master to start. +Jun 6 13:25:02.629: INFO: Selector matched 1 pods for map[app:redis] +Jun 6 13:25:02.629: INFO: Found 0 / 1 +Jun 6 13:25:03.628: INFO: Selector matched 1 pods for map[app:redis] +Jun 6 13:25:03.628: INFO: Found 1 / 1 +Jun 6 13:25:03.628: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 +STEP: patching all pods +Jun 6 13:25:03.630: INFO: Selector matched 1 pods for map[app:redis] +Jun 6 13:25:03.630: INFO: ForEach: Found 1 pods from the filter. Now looping through them. +Jun 6 13:25:03.630: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 patch pod redis-master-kcnft --namespace=kubectl-8978 -p {"metadata":{"annotations":{"x":"y"}}}' +Jun 6 13:25:03.698: INFO: stderr: "" +Jun 6 13:25:03.698: INFO: stdout: "pod/redis-master-kcnft patched\n" +STEP: checking annotations +Jun 6 13:25:03.700: INFO: Selector matched 1 pods for map[app:redis] +Jun 6 13:25:03.700: INFO: ForEach: Found 1 pods from the filter. Now looping through them. +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:25:03.700: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-8978" for this suite. +Jun 6 13:25:25.709: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:25:25.791: INFO: namespace kubectl-8978 deletion completed in 22.087948833s + +• [SLOW TEST:24.437 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + [k8s.io] Kubectl patch + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should add annotations for pods in rc [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected configMap + should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Projected configMap + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:25:25.791: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-3675 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating configMap with name projected-configmap-test-volume-map-8b770eb3-885e-11e9-b613-8a9bc7c14a19 +STEP: Creating a pod to test consume configMaps +Jun 6 13:25:25.974: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-8b777579-885e-11e9-b613-8a9bc7c14a19" in namespace "projected-3675" to be "success or failure" +Jun 6 13:25:25.977: INFO: Pod "pod-projected-configmaps-8b777579-885e-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 3.146302ms +Jun 6 13:25:27.979: INFO: Pod "pod-projected-configmaps-8b777579-885e-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.005567044s +STEP: Saw pod success +Jun 6 13:25:27.979: INFO: Pod "pod-projected-configmaps-8b777579-885e-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure" +Jun 6 13:25:27.981: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-projected-configmaps-8b777579-885e-11e9-b613-8a9bc7c14a19 container projected-configmap-volume-test: +STEP: delete the pod +Jun 6 13:25:27.994: INFO: Waiting for pod pod-projected-configmaps-8b777579-885e-11e9-b613-8a9bc7c14a19 to disappear +Jun 6 13:25:27.996: INFO: Pod pod-projected-configmaps-8b777579-885e-11e9-b613-8a9bc7c14a19 no longer exists +[AfterEach] [sig-storage] Projected configMap + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:25:27.996: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-3675" for this suite. +Jun 6 13:25:34.007: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:25:34.074: INFO: namespace projected-3675 deletion completed in 6.075602627s + +• [SLOW TEST:8.284 seconds] +[sig-storage] Projected configMap +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:33 + should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSS +------------------------------ +[sig-storage] Projected combined + should project all components that make up the projection API [Projection][NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Projected combined + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:25:34.075: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-6282 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should project all components that make up the projection API [Projection][NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating configMap with name configmap-projected-all-test-volume-905f86ba-885e-11e9-b613-8a9bc7c14a19 +STEP: Creating secret with name secret-projected-all-test-volume-905f869d-885e-11e9-b613-8a9bc7c14a19 +STEP: Creating a pod to test Check all projections for projected volume plugin +Jun 6 13:25:34.209: INFO: Waiting up to 5m0s for pod "projected-volume-905f866d-885e-11e9-b613-8a9bc7c14a19" in namespace "projected-6282" to be "success or failure" +Jun 6 13:25:34.211: INFO: Pod "projected-volume-905f866d-885e-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 1.900378ms +Jun 6 13:25:36.230: INFO: Pod "projected-volume-905f866d-885e-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.020795993s +STEP: Saw pod success +Jun 6 13:25:36.253: INFO: Pod "projected-volume-905f866d-885e-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure" +Jun 6 13:25:36.256: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod projected-volume-905f866d-885e-11e9-b613-8a9bc7c14a19 container projected-all-volume-test: +STEP: delete the pod +Jun 6 13:25:36.274: INFO: Waiting for pod projected-volume-905f866d-885e-11e9-b613-8a9bc7c14a19 to disappear +Jun 6 13:25:36.320: INFO: Pod projected-volume-905f866d-885e-11e9-b613-8a9bc7c14a19 no longer exists +[AfterEach] [sig-storage] Projected combined + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:25:36.320: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-6282" for this suite. +Jun 6 13:25:42.329: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:25:42.452: INFO: namespace projected-6282 deletion completed in 6.130103527s + +• [SLOW TEST:8.377 seconds] +[sig-storage] Projected combined +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_combined.go:31 + should project all components that make up the projection API [Projection][NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[k8s.io] Variable Expansion + should allow substituting values in a container's command [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [k8s.io] Variable Expansion + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:25:42.453: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename var-expansion +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in var-expansion-5093 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should allow substituting values in a container's command [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a pod to test substitution in container's command +Jun 6 13:25:42.584: INFO: Waiting up to 5m0s for pod "var-expansion-955e06c4-885e-11e9-b613-8a9bc7c14a19" in namespace "var-expansion-5093" to be "success or failure" +Jun 6 13:25:42.588: INFO: Pod "var-expansion-955e06c4-885e-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 4.320167ms +Jun 6 13:25:44.591: INFO: Pod "var-expansion-955e06c4-885e-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.007172245s +STEP: Saw pod success +Jun 6 13:25:44.591: INFO: Pod "var-expansion-955e06c4-885e-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure" +Jun 6 13:25:44.593: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod var-expansion-955e06c4-885e-11e9-b613-8a9bc7c14a19 container dapi-container: +STEP: delete the pod +Jun 6 13:25:44.610: INFO: Waiting for pod var-expansion-955e06c4-885e-11e9-b613-8a9bc7c14a19 to disappear +Jun 6 13:25:44.612: INFO: Pod var-expansion-955e06c4-885e-11e9-b613-8a9bc7c14a19 no longer exists +[AfterEach] [k8s.io] Variable Expansion + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:25:44.612: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "var-expansion-5093" for this suite. +Jun 6 13:25:50.621: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:25:50.688: INFO: namespace var-expansion-5093 deletion completed in 6.073419543s + +• [SLOW TEST:8.235 seconds] +[k8s.io] Variable Expansion +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should allow substituting values in a container's command [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected downwardAPI + should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:25:50.689: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-0 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39 +[It] should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a pod to test downward API volume plugin +Jun 6 13:25:50.822: INFO: Waiting up to 5m0s for pod "downwardapi-volume-9a46a026-885e-11e9-b613-8a9bc7c14a19" in namespace "projected-0" to be "success or failure" +Jun 6 13:25:50.831: INFO: Pod "downwardapi-volume-9a46a026-885e-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 6.212872ms +Jun 6 13:25:52.833: INFO: Pod "downwardapi-volume-9a46a026-885e-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.008916966s +STEP: Saw pod success +Jun 6 13:25:52.833: INFO: Pod "downwardapi-volume-9a46a026-885e-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure" +Jun 6 13:25:52.919: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod downwardapi-volume-9a46a026-885e-11e9-b613-8a9bc7c14a19 container client-container: +STEP: delete the pod +Jun 6 13:25:52.932: INFO: Waiting for pod downwardapi-volume-9a46a026-885e-11e9-b613-8a9bc7c14a19 to disappear +Jun 6 13:25:52.934: INFO: Pod downwardapi-volume-9a46a026-885e-11e9-b613-8a9bc7c14a19 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:25:52.934: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-0" for this suite. +Jun 6 13:25:58.945: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:25:59.043: INFO: namespace projected-0 deletion completed in 6.10473844s + +• [SLOW TEST:8.354 seconds] +[sig-storage] Projected downwardAPI +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33 + should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +[sig-storage] Projected downwardAPI + should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:25:59.043: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-9168 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39 +[It] should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a pod to test downward API volume plugin +Jun 6 13:25:59.180: INFO: Waiting up to 5m0s for pod "downwardapi-volume-9f423b67-885e-11e9-b613-8a9bc7c14a19" in namespace "projected-9168" to be "success or failure" +Jun 6 13:25:59.183: INFO: Pod "downwardapi-volume-9f423b67-885e-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 3.056656ms +Jun 6 13:26:01.187: INFO: Pod "downwardapi-volume-9f423b67-885e-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.007045427s +Jun 6 13:26:03.203: INFO: Pod "downwardapi-volume-9f423b67-885e-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.022336446s +STEP: Saw pod success +Jun 6 13:26:03.203: INFO: Pod "downwardapi-volume-9f423b67-885e-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure" +Jun 6 13:26:03.204: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod downwardapi-volume-9f423b67-885e-11e9-b613-8a9bc7c14a19 container client-container: +STEP: delete the pod +Jun 6 13:26:03.229: INFO: Waiting for pod downwardapi-volume-9f423b67-885e-11e9-b613-8a9bc7c14a19 to disappear +Jun 6 13:26:03.234: INFO: Pod downwardapi-volume-9f423b67-885e-11e9-b613-8a9bc7c14a19 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:26:03.234: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-9168" for this suite. +Jun 6 13:26:09.244: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:26:09.350: INFO: namespace projected-9168 deletion completed in 6.112951856s + +• [SLOW TEST:10.308 seconds] +[sig-storage] Projected downwardAPI +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33 + should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +[sig-storage] EmptyDir volumes + should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:26:09.350: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename emptydir +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in emptydir-9477 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a pod to test emptydir 0644 on tmpfs +Jun 6 13:26:09.533: INFO: Waiting up to 5m0s for pod "pod-a56dfc79-885e-11e9-b613-8a9bc7c14a19" in namespace "emptydir-9477" to be "success or failure" +Jun 6 13:26:09.538: INFO: Pod "pod-a56dfc79-885e-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 4.812908ms +Jun 6 13:26:11.540: INFO: Pod "pod-a56dfc79-885e-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.007380208s +STEP: Saw pod success +Jun 6 13:26:11.540: INFO: Pod "pod-a56dfc79-885e-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure" +Jun 6 13:26:11.542: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-a56dfc79-885e-11e9-b613-8a9bc7c14a19 container test-container: +STEP: delete the pod +Jun 6 13:26:11.555: INFO: Waiting for pod pod-a56dfc79-885e-11e9-b613-8a9bc7c14a19 to disappear +Jun 6 13:26:11.556: INFO: Pod pod-a56dfc79-885e-11e9-b613-8a9bc7c14a19 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:26:11.556: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-9477" for this suite. +Jun 6 13:26:17.566: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:26:17.640: INFO: namespace emptydir-9477 deletion completed in 6.082256756s + +• [SLOW TEST:8.290 seconds] +[sig-storage] EmptyDir volumes +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41 + should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] Proxy version v1 + should proxy logs on node using proxy subresource [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] version v1 + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:26:17.641: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename proxy +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in proxy-6070 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should proxy logs on node using proxy subresource [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +Jun 6 13:26:17.774: INFO: (0) /api/v1/nodes/ip-172-16-66-200.ec2.internal/proxy/logs/:
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in emptydir-483
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test emptydir 0644 on tmpfs
+Jun  6 13:26:24.097: INFO: Waiting up to 5m0s for pod "pod-ae1c6b2c-885e-11e9-b613-8a9bc7c14a19" in namespace "emptydir-483" to be "success or failure"
+Jun  6 13:26:24.100: INFO: Pod "pod-ae1c6b2c-885e-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 3.026946ms
+Jun  6 13:26:26.102: INFO: Pod "pod-ae1c6b2c-885e-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.00552633s
+STEP: Saw pod success
+Jun  6 13:26:26.102: INFO: Pod "pod-ae1c6b2c-885e-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 13:26:26.104: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-ae1c6b2c-885e-11e9-b613-8a9bc7c14a19 container test-container: 
+STEP: delete the pod
+Jun  6 13:26:26.120: INFO: Waiting for pod pod-ae1c6b2c-885e-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 13:26:26.122: INFO: Pod pod-ae1c6b2c-885e-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:26:26.122: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-483" for this suite.
+Jun  6 13:26:32.132: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:26:32.205: INFO: namespace emptydir-483 deletion completed in 6.080440408s
+
+• [SLOW TEST:8.243 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl rolling-update 
+  should support rolling-update to same image  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:26:32.205: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in kubectl-8986
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213
+[BeforeEach] [k8s.io] Kubectl rolling-update
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1414
+[It] should support rolling-update to same image  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: running the image docker.io/library/nginx:1.14-alpine
+Jun  6 13:26:32.329: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 run e2e-test-nginx-rc --image=docker.io/library/nginx:1.14-alpine --generator=run/v1 --namespace=kubectl-8986'
+Jun  6 13:26:32.394: INFO: stderr: "kubectl run --generator=run/v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n"
+Jun  6 13:26:32.394: INFO: stdout: "replicationcontroller/e2e-test-nginx-rc created\n"
+STEP: verifying the rc e2e-test-nginx-rc was created
+Jun  6 13:26:32.397: INFO: Waiting for rc e2e-test-nginx-rc to stabilize, generation 1 observed generation 0 spec.replicas 1 status.replicas 0
+Jun  6 13:26:32.430: INFO: Waiting for rc e2e-test-nginx-rc to stabilize, generation 1 observed generation 1 spec.replicas 1 status.replicas 0
+STEP: rolling-update to same image controller
+Jun  6 13:26:32.433: INFO: scanned /root for discovery docs: 
+Jun  6 13:26:32.433: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 rolling-update e2e-test-nginx-rc --update-period=1s --image=docker.io/library/nginx:1.14-alpine --image-pull-policy=IfNotPresent --namespace=kubectl-8986'
+Jun  6 13:26:46.374: INFO: stderr: "Command \"rolling-update\" is deprecated, use \"rollout\" instead\n"
+Jun  6 13:26:46.374: INFO: stdout: "Created e2e-test-nginx-rc-9d994d24ec4b4e5441545839106eb390\nScaling up e2e-test-nginx-rc-9d994d24ec4b4e5441545839106eb390 from 0 to 1, scaling down e2e-test-nginx-rc from 1 to 0 (keep 1 pods available, don't exceed 2 pods)\nScaling e2e-test-nginx-rc-9d994d24ec4b4e5441545839106eb390 up to 1\nScaling e2e-test-nginx-rc down to 0\nUpdate succeeded. Deleting old controller: e2e-test-nginx-rc\nRenaming e2e-test-nginx-rc-9d994d24ec4b4e5441545839106eb390 to e2e-test-nginx-rc\nreplicationcontroller/e2e-test-nginx-rc rolling updated\n"
+Jun  6 13:26:46.374: INFO: stdout: "Created e2e-test-nginx-rc-9d994d24ec4b4e5441545839106eb390\nScaling up e2e-test-nginx-rc-9d994d24ec4b4e5441545839106eb390 from 0 to 1, scaling down e2e-test-nginx-rc from 1 to 0 (keep 1 pods available, don't exceed 2 pods)\nScaling e2e-test-nginx-rc-9d994d24ec4b4e5441545839106eb390 up to 1\nScaling e2e-test-nginx-rc down to 0\nUpdate succeeded. Deleting old controller: e2e-test-nginx-rc\nRenaming e2e-test-nginx-rc-9d994d24ec4b4e5441545839106eb390 to e2e-test-nginx-rc\nreplicationcontroller/e2e-test-nginx-rc rolling updated\n"
+STEP: waiting for all containers in run=e2e-test-nginx-rc pods to come up.
+Jun  6 13:26:46.374: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l run=e2e-test-nginx-rc --namespace=kubectl-8986'
+Jun  6 13:26:46.437: INFO: stderr: ""
+Jun  6 13:26:46.437: INFO: stdout: "e2e-test-nginx-rc-9d994d24ec4b4e5441545839106eb390-c6ptd "
+Jun  6 13:26:46.437: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods e2e-test-nginx-rc-9d994d24ec4b4e5441545839106eb390-c6ptd -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "e2e-test-nginx-rc") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-8986'
+Jun  6 13:26:46.501: INFO: stderr: ""
+Jun  6 13:26:46.501: INFO: stdout: "true"
+Jun  6 13:26:46.501: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods e2e-test-nginx-rc-9d994d24ec4b4e5441545839106eb390-c6ptd -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "e2e-test-nginx-rc"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-8986'
+Jun  6 13:26:46.561: INFO: stderr: ""
+Jun  6 13:26:46.561: INFO: stdout: "docker.io/library/nginx:1.14-alpine"
+Jun  6 13:26:46.562: INFO: e2e-test-nginx-rc-9d994d24ec4b4e5441545839106eb390-c6ptd is verified up and running
+[AfterEach] [k8s.io] Kubectl rolling-update
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1420
+Jun  6 13:26:46.562: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 delete rc e2e-test-nginx-rc --namespace=kubectl-8986'
+Jun  6 13:26:46.630: INFO: stderr: ""
+Jun  6 13:26:46.630: INFO: stdout: "replicationcontroller \"e2e-test-nginx-rc\" deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:26:46.630: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-8986" for this suite.
+Jun  6 13:26:52.646: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:26:52.768: INFO: namespace kubectl-8986 deletion completed in 6.133292941s
+
+• [SLOW TEST:20.563 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl rolling-update
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    should support rolling-update to same image  [Conformance]
+    /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-scheduling] SchedulerPredicates [Serial] 
+  validates that NodeSelector is respected if matching  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:26:52.768: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename sched-pred
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in sched-pred-1894
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:79
+Jun  6 13:26:52.903: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready
+Jun  6 13:26:52.908: INFO: Waiting for terminating namespaces to be deleted...
+Jun  6 13:26:52.909: INFO: 
+Logging pods the kubelet thinks is on node ip-172-16-66-200.ec2.internal before test
+Jun  6 13:26:52.916: INFO: canal-kgff5 from kube-system started at 2019-06-06 12:02:46 +0000 UTC (3 container statuses recorded)
+Jun  6 13:26:52.916: INFO: 	Container calico-node ready: true, restart count 0
+Jun  6 13:26:52.916: INFO: 	Container kube-flannel ready: true, restart count 0
+Jun  6 13:26:52.916: INFO: 	Container update-network-condition ready: true, restart count 0
+Jun  6 13:26:52.916: INFO: kublr-monitoring-kube-state-metrics-6fb9c7594b-zqb9d from kube-system started at 2019-06-06 12:03:51 +0000 UTC (2 container statuses recorded)
+Jun  6 13:26:52.916: INFO: 	Container addon-resizer ready: true, restart count 0
+Jun  6 13:26:52.916: INFO: 	Container kube-state-metrics ready: true, restart count 0
+Jun  6 13:26:52.916: INFO: k8s-api-haproxy-313ee916843387945fe68a625784d2a07122c117ee63e285821800170e69f652-ip-172-16-66-200.ec2.internal from kube-system started at  (0 container statuses recorded)
+Jun  6 13:26:52.916: INFO: kube-proxy-7a09f3d398339426fb2660a3d58c4b6a781901227d4954ccce4069e834b95d61-ip-172-16-66-200.ec2.internal from kube-system started at  (0 container statuses recorded)
+Jun  6 13:26:52.916: INFO: heapster-v1.6.0-beta.1-6979f49998-zrlsp from kube-system started at 2019-06-06 12:03:25 +0000 UTC (2 container statuses recorded)
+Jun  6 13:26:52.916: INFO: 	Container heapster ready: true, restart count 0
+Jun  6 13:26:52.916: INFO: 	Container heapster-nanny ready: true, restart count 0
+Jun  6 13:26:52.916: INFO: node-local-dns-vq5mj from kube-system started at 2019-06-06 12:02:59 +0000 UTC (1 container statuses recorded)
+Jun  6 13:26:52.917: INFO: 	Container node-cache ready: true, restart count 0
+Jun  6 13:26:52.917: INFO: kublr-logging-rabbitmq-0 from kube-system started at 2019-06-06 12:03:30 +0000 UTC (1 container statuses recorded)
+Jun  6 13:26:52.917: INFO: 	Container rabbitmq ready: true, restart count 0
+Jun  6 13:26:52.917: INFO: sonobuoy-systemd-logs-daemon-set-bc8f4f63e26f462d-d4vcf from heptio-sonobuoy started at 2019-06-06 12:54:36 +0000 UTC (2 container statuses recorded)
+Jun  6 13:26:52.917: INFO: 	Container sonobuoy-worker ready: true, restart count 0
+Jun  6 13:26:52.917: INFO: 	Container systemd-logs ready: true, restart count 0
+Jun  6 13:26:52.917: INFO: metrics-server-v0.3.1-7f597fc6fd-ljsdj from kube-system started at 2019-06-06 12:03:18 +0000 UTC (2 container statuses recorded)
+Jun  6 13:26:52.917: INFO: 	Container metrics-server ready: true, restart count 0
+Jun  6 13:26:52.917: INFO: 	Container metrics-server-nanny ready: true, restart count 0
+Jun  6 13:26:52.917: INFO: kublr-logging-fluentd-es-v2.0.2-pl5tm from kube-system started at 2019-06-06 12:03:30 +0000 UTC (1 container statuses recorded)
+Jun  6 13:26:52.917: INFO: 	Container fluentd-es ready: true, restart count 0
+Jun  6 13:26:52.917: INFO: kublr-logging-rabbitmq-exporter-85b669fcb9-dv2t2 from kube-system started at 2019-06-06 12:03:30 +0000 UTC (1 container statuses recorded)
+Jun  6 13:26:52.917: INFO: 	Container kublr-logging-rabbitmq-exporter ready: true, restart count 0
+Jun  6 13:26:52.917: INFO: 
+Logging pods the kubelet thinks is on node ip-172-16-89-18.ec2.internal before test
+Jun  6 13:26:52.926: INFO: k8s-api-haproxy-313ee916843387945fe68a625784d2a07122c117ee63e285821800170e69f652-ip-172-16-89-18.ec2.internal from kube-system started at  (0 container statuses recorded)
+Jun  6 13:26:52.926: INFO: sonobuoy from heptio-sonobuoy started at 2019-06-06 12:54:33 +0000 UTC (1 container statuses recorded)
+Jun  6 13:26:52.926: INFO: 	Container kube-sonobuoy ready: true, restart count 0
+Jun  6 13:26:52.926: INFO: kublr-monitoring-prometheus-fbf8fff5b-l4hmv from kube-system started at 2019-06-06 12:03:34 +0000 UTC (1 container statuses recorded)
+Jun  6 13:26:52.926: INFO: 	Container prometheus ready: true, restart count 0
+Jun  6 13:26:52.926: INFO: canal-pszff from kube-system started at 2019-06-06 12:02:46 +0000 UTC (3 container statuses recorded)
+Jun  6 13:26:52.926: INFO: 	Container calico-node ready: true, restart count 0
+Jun  6 13:26:52.926: INFO: 	Container kube-flannel ready: true, restart count 0
+Jun  6 13:26:52.926: INFO: 	Container update-network-condition ready: true, restart count 0
+Jun  6 13:26:52.926: INFO: kubernetes-dashboard-57c67b4666-9j6pn from kube-system started at 2019-06-06 12:02:57 +0000 UTC (1 container statuses recorded)
+Jun  6 13:26:52.926: INFO: 	Container kubernetes-dashboard ready: true, restart count 0
+Jun  6 13:26:52.926: INFO: coredns-fb8b8dccf-w9d7l from kube-system started at 2019-06-06 12:03:06 +0000 UTC (1 container statuses recorded)
+Jun  6 13:26:52.926: INFO: 	Container coredns ready: true, restart count 0
+Jun  6 13:26:52.926: INFO: kublr-system-shell-84d985ff44-nwqdk from kube-system started at 2019-06-06 12:03:29 +0000 UTC (1 container statuses recorded)
+Jun  6 13:26:52.926: INFO: 	Container shell ready: true, restart count 0
+Jun  6 13:26:52.926: INFO: sonobuoy-systemd-logs-daemon-set-bc8f4f63e26f462d-k864b from heptio-sonobuoy started at 2019-06-06 12:54:36 +0000 UTC (2 container statuses recorded)
+Jun  6 13:26:52.926: INFO: 	Container sonobuoy-worker ready: true, restart count 0
+Jun  6 13:26:52.926: INFO: 	Container systemd-logs ready: true, restart count 0
+Jun  6 13:26:52.926: INFO: node-local-dns-75dpv from kube-system started at 2019-06-06 12:02:57 +0000 UTC (1 container statuses recorded)
+Jun  6 13:26:52.926: INFO: 	Container node-cache ready: true, restart count 0
+Jun  6 13:26:52.926: INFO: tiller-deploy-89688d99f-c4mp7 from kube-system started at 2019-06-06 12:02:58 +0000 UTC (1 container statuses recorded)
+Jun  6 13:26:52.926: INFO: 	Container tiller ready: true, restart count 0
+Jun  6 13:26:52.926: INFO: kublr-logging-fluentd-es-v2.0.2-lxzrw from kube-system started at 2019-06-06 12:03:30 +0000 UTC (1 container statuses recorded)
+Jun  6 13:26:52.926: INFO: 	Container fluentd-es ready: true, restart count 0
+Jun  6 13:26:52.926: INFO: kube-proxy-7a09f3d398339426fb2660a3d58c4b6a781901227d4954ccce4069e834b95d61-ip-172-16-89-18.ec2.internal from kube-system started at  (0 container statuses recorded)
+Jun  6 13:26:52.926: INFO: kube-dns-autoscaler-5d6dc48cb8-hnkfq from kube-system started at 2019-06-06 12:02:57 +0000 UTC (1 container statuses recorded)
+Jun  6 13:26:52.926: INFO: 	Container autoscaler ready: true, restart count 0
+[It] validates that NodeSelector is respected if matching  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Trying to launch a pod without a label to get a node which can launch it.
+STEP: Explicitly delete pod here to free the resource it takes.
+STEP: Trying to apply a random label on the found node.
+STEP: verifying the node has the label kubernetes.io/e2e-c080e06f-885e-11e9-b613-8a9bc7c14a19 42
+STEP: Trying to relaunch the pod, now with labels.
+STEP: removing the label kubernetes.io/e2e-c080e06f-885e-11e9-b613-8a9bc7c14a19 off the node ip-172-16-66-200.ec2.internal
+STEP: verifying the node doesn't have the label kubernetes.io/e2e-c080e06f-885e-11e9-b613-8a9bc7c14a19
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:26:56.980: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "sched-pred-1894" for this suite.
+Jun  6 13:27:04.989: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:27:05.057: INFO: namespace sched-pred-1894 deletion completed in 8.074922529s
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:70
+
+• [SLOW TEST:12.289 seconds]
+[sig-scheduling] SchedulerPredicates [Serial]
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:22
+  validates that NodeSelector is respected if matching  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:27:05.058: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in emptydir-6154
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test emptydir 0777 on tmpfs
+Jun  6 13:27:05.190: INFO: Waiting up to 5m0s for pod "pod-c69aba83-885e-11e9-b613-8a9bc7c14a19" in namespace "emptydir-6154" to be "success or failure"
+Jun  6 13:27:05.193: INFO: Pod "pod-c69aba83-885e-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 3.143894ms
+Jun  6 13:27:07.196: INFO: Pod "pod-c69aba83-885e-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006167359s
+STEP: Saw pod success
+Jun  6 13:27:07.196: INFO: Pod "pod-c69aba83-885e-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 13:27:07.198: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-c69aba83-885e-11e9-b613-8a9bc7c14a19 container test-container: 
+STEP: delete the pod
+Jun  6 13:27:07.214: INFO: Waiting for pod pod-c69aba83-885e-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 13:27:07.215: INFO: Pod pod-c69aba83-885e-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:27:07.215: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-6154" for this suite.
+Jun  6 13:27:13.224: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:27:13.290: INFO: namespace emptydir-6154 deletion completed in 6.073073933s
+
+• [SLOW TEST:8.233 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] [sig-node] Pods Extended [k8s.io] Pods Set QOS Class 
+  should be submitted and removed  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] [sig-node] Pods Extended
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:27:13.291: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename pods
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in pods-3559
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods Set QOS Class
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/node/pods.go:177
+[It] should be submitted and removed  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating the pod
+STEP: submitting the pod to kubernetes
+STEP: verifying QOS class is set on the pod
+[AfterEach] [k8s.io] [sig-node] Pods Extended
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:27:13.425: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pods-3559" for this suite.
+Jun  6 13:27:35.441: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:27:35.555: INFO: namespace pods-3559 deletion completed in 22.125717831s
+
+• [SLOW TEST:22.264 seconds]
+[k8s.io] [sig-node] Pods Extended
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  [k8s.io] Pods Set QOS Class
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    should be submitted and removed  [Conformance]
+    /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSS
+------------------------------
+[k8s.io] Variable Expansion 
+  should allow composing env vars into new env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Variable Expansion
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:27:35.555: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename var-expansion
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in var-expansion-3710
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should allow composing env vars into new env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test env composition
+Jun  6 13:27:35.689: INFO: Waiting up to 5m0s for pod "var-expansion-d8c817ba-885e-11e9-b613-8a9bc7c14a19" in namespace "var-expansion-3710" to be "success or failure"
+Jun  6 13:27:35.692: INFO: Pod "var-expansion-d8c817ba-885e-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.259248ms
+Jun  6 13:27:37.694: INFO: Pod "var-expansion-d8c817ba-885e-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.004941343s
+STEP: Saw pod success
+Jun  6 13:27:37.695: INFO: Pod "var-expansion-d8c817ba-885e-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 13:27:37.696: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod var-expansion-d8c817ba-885e-11e9-b613-8a9bc7c14a19 container dapi-container: 
+STEP: delete the pod
+Jun  6 13:27:37.709: INFO: Waiting for pod var-expansion-d8c817ba-885e-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 13:27:37.710: INFO: Pod var-expansion-d8c817ba-885e-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [k8s.io] Variable Expansion
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:27:37.710: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "var-expansion-3710" for this suite.
+Jun  6 13:27:43.721: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:27:43.880: INFO: namespace var-expansion-3710 deletion completed in 6.167914473s
+
+• [SLOW TEST:8.326 seconds]
+[k8s.io] Variable Expansion
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should allow composing env vars into new env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-node] ConfigMap 
+  should fail to create ConfigMap with empty key [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-node] ConfigMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:27:43.881: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename configmap
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in configmap-2962
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should fail to create ConfigMap with empty key [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating configMap that has name configmap-test-emptyKey-ddbe7fd1-885e-11e9-b613-8a9bc7c14a19
+[AfterEach] [sig-node] ConfigMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:27:44.007: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "configmap-2962" for this suite.
+Jun  6 13:27:50.017: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:27:50.190: INFO: namespace configmap-2962 deletion completed in 6.180846497s
+
+• [SLOW TEST:6.309 seconds]
+[sig-node] ConfigMap
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap.go:32
+  should fail to create ConfigMap with empty key [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected configMap 
+  optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:27:50.190: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-6141
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating configMap with name cm-test-opt-del-e187c701-885e-11e9-b613-8a9bc7c14a19
+STEP: Creating configMap with name cm-test-opt-upd-e187c750-885e-11e9-b613-8a9bc7c14a19
+STEP: Creating the pod
+STEP: Deleting configmap cm-test-opt-del-e187c701-885e-11e9-b613-8a9bc7c14a19
+STEP: Updating configmap cm-test-opt-upd-e187c750-885e-11e9-b613-8a9bc7c14a19
+STEP: Creating configMap with name cm-test-opt-create-e187c76d-885e-11e9-b613-8a9bc7c14a19
+STEP: waiting to observe update in volume
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:27:54.450: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-6141" for this suite.
+Jun  6 13:28:16.460: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:28:16.526: INFO: namespace projected-6141 deletion completed in 22.074058712s
+
+• [SLOW TEST:26.336 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:33
+  optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] Daemon set [Serial] 
+  should retry creating failed daemon pods [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:28:16.527: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename daemonsets
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in daemonsets-9344
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:102
+[It] should retry creating failed daemon pods [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a simple DaemonSet "daemon-set"
+STEP: Check that daemon pods launch on every node of the cluster.
+Jun  6 13:28:16.672: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:28:16.679: INFO: Number of nodes with available pods: 0
+Jun  6 13:28:16.680: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod
+Jun  6 13:28:17.682: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:28:17.684: INFO: Number of nodes with available pods: 0
+Jun  6 13:28:17.684: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod
+Jun  6 13:28:18.720: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:28:18.722: INFO: Number of nodes with available pods: 1
+Jun  6 13:28:18.722: INFO: Node ip-172-16-89-18.ec2.internal is running more than one daemon pod
+Jun  6 13:28:19.682: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:28:19.685: INFO: Number of nodes with available pods: 2
+Jun  6 13:28:19.685: INFO: Number of running nodes: 2, number of available pods: 2
+STEP: Set a daemon pod's phase to 'Failed', check that the daemon pod is revived.
+Jun  6 13:28:19.697: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:28:19.705: INFO: Number of nodes with available pods: 1
+Jun  6 13:28:19.705: INFO: Node ip-172-16-89-18.ec2.internal is running more than one daemon pod
+Jun  6 13:28:20.708: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:28:20.709: INFO: Number of nodes with available pods: 1
+Jun  6 13:28:20.710: INFO: Node ip-172-16-89-18.ec2.internal is running more than one daemon pod
+Jun  6 13:28:21.709: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:28:21.710: INFO: Number of nodes with available pods: 1
+Jun  6 13:28:21.711: INFO: Node ip-172-16-89-18.ec2.internal is running more than one daemon pod
+Jun  6 13:28:22.708: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:28:22.719: INFO: Number of nodes with available pods: 2
+Jun  6 13:28:22.719: INFO: Number of running nodes: 2, number of available pods: 2
+STEP: Wait for the failed daemon pod to be completely deleted.
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:68
+STEP: Deleting DaemonSet "daemon-set"
+STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-9344, will wait for the garbage collector to delete the pods
+Jun  6 13:28:22.780: INFO: Deleting DaemonSet.extensions daemon-set took: 4.719592ms
+Jun  6 13:28:23.180: INFO: Terminating DaemonSet.extensions daemon-set pods took: 400.257602ms
+Jun  6 13:28:35.882: INFO: Number of nodes with available pods: 0
+Jun  6 13:28:35.882: INFO: Number of running nodes: 0, number of available pods: 0
+Jun  6 13:28:35.884: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/daemonsets-9344/daemonsets","resourceVersion":"13712"},"items":null}
+
+Jun  6 13:28:35.885: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/daemonsets-9344/pods","resourceVersion":"13712"},"items":null}
+
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:28:35.892: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "daemonsets-9344" for this suite.
+Jun  6 13:28:41.902: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:28:41.972: INFO: namespace daemonsets-9344 deletion completed in 6.077902322s
+
+• [SLOW TEST:25.446 seconds]
+[sig-apps] Daemon set [Serial]
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should retry creating failed daemon pods [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSS
+------------------------------
+[sig-auth] ServiceAccounts 
+  should allow opting out of API token automount  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-auth] ServiceAccounts
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:28:41.972: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename svcaccounts
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in svcaccounts-8981
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should allow opting out of API token automount  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: getting the auto-created API token
+Jun  6 13:28:42.615: INFO: created pod pod-service-account-defaultsa
+Jun  6 13:28:42.616: INFO: pod pod-service-account-defaultsa service account token volume mount: true
+Jun  6 13:28:42.626: INFO: created pod pod-service-account-mountsa
+Jun  6 13:28:42.626: INFO: pod pod-service-account-mountsa service account token volume mount: true
+Jun  6 13:28:42.635: INFO: created pod pod-service-account-nomountsa
+Jun  6 13:28:42.635: INFO: pod pod-service-account-nomountsa service account token volume mount: false
+Jun  6 13:28:42.639: INFO: created pod pod-service-account-defaultsa-mountspec
+Jun  6 13:28:42.639: INFO: pod pod-service-account-defaultsa-mountspec service account token volume mount: true
+Jun  6 13:28:42.654: INFO: created pod pod-service-account-mountsa-mountspec
+Jun  6 13:28:42.654: INFO: pod pod-service-account-mountsa-mountspec service account token volume mount: true
+Jun  6 13:28:42.663: INFO: created pod pod-service-account-nomountsa-mountspec
+Jun  6 13:28:42.663: INFO: pod pod-service-account-nomountsa-mountspec service account token volume mount: true
+Jun  6 13:28:42.678: INFO: created pod pod-service-account-defaultsa-nomountspec
+Jun  6 13:28:42.678: INFO: pod pod-service-account-defaultsa-nomountspec service account token volume mount: false
+Jun  6 13:28:42.684: INFO: created pod pod-service-account-mountsa-nomountspec
+Jun  6 13:28:42.684: INFO: pod pod-service-account-mountsa-nomountspec service account token volume mount: false
+Jun  6 13:28:42.691: INFO: created pod pod-service-account-nomountsa-nomountspec
+Jun  6 13:28:42.691: INFO: pod pod-service-account-nomountsa-nomountspec service account token volume mount: false
+[AfterEach] [sig-auth] ServiceAccounts
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:28:42.692: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "svcaccounts-8981" for this suite.
+Jun  6 13:28:48.705: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:28:48.894: INFO: namespace svcaccounts-8981 deletion completed in 6.195836519s
+
+• [SLOW TEST:6.922 seconds]
+[sig-auth] ServiceAccounts
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/auth/framework.go:22
+  should allow opting out of API token automount  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSS
+------------------------------
+[sig-storage] ConfigMap 
+  updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:28:48.894: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename configmap
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in configmap-7763
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating configMap with name configmap-test-upd-048509a9-885f-11e9-b613-8a9bc7c14a19
+STEP: Creating the pod
+STEP: Updating configmap configmap-test-upd-048509a9-885f-11e9-b613-8a9bc7c14a19
+STEP: waiting to observe update in volume
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:28:53.122: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "configmap-7763" for this suite.
+Jun  6 13:29:15.131: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:29:15.199: INFO: namespace configmap-7763 deletion completed in 22.074673415s
+
+• [SLOW TEST:26.305 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32
+  updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSS
+------------------------------
+[k8s.io] Kubelet when scheduling a busybox command that always fails in a pod 
+  should have an terminated reason [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:29:15.199: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename kubelet-test
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in kubelet-test-1582
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37
+[BeforeEach] when scheduling a busybox command that always fails in a pod
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:81
+[It] should have an terminated reason [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[AfterEach] [k8s.io] Kubelet
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:29:19.334: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubelet-test-1582" for this suite.
+Jun  6 13:29:25.343: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:29:25.413: INFO: namespace kubelet-test-1582 deletion completed in 6.076545503s
+
+• [SLOW TEST:10.213 seconds]
+[k8s.io] Kubelet
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  when scheduling a busybox command that always fails in a pod
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:78
+    should have an terminated reason [NodeConformance] [Conformance]
+    /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSS
+------------------------------
+[sig-apps] Deployment 
+  RecreateDeployment should delete old pods and create new ones [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:29:25.413: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename deployment
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in deployment-1290
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:65
+[It] RecreateDeployment should delete old pods and create new ones [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+Jun  6 13:29:25.589: INFO: Creating deployment "test-recreate-deployment"
+Jun  6 13:29:25.598: INFO: Waiting deployment "test-recreate-deployment" to be updated to revision 1
+Jun  6 13:29:25.630: INFO: deployment "test-recreate-deployment" doesn't have the required revision set
+Jun  6 13:29:27.634: INFO: Waiting deployment "test-recreate-deployment" to complete
+Jun  6 13:29:27.636: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424565, loc:(*time.Location)(0x8a140e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424565, loc:(*time.Location)(0x8a140e0)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424565, loc:(*time.Location)(0x8a140e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695424565, loc:(*time.Location)(0x8a140e0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-7d57d5ff7c\" is progressing."}}, CollisionCount:(*int32)(nil)}
+Jun  6 13:29:29.639: INFO: Triggering a new rollout for deployment "test-recreate-deployment"
+Jun  6 13:29:29.722: INFO: Updating deployment test-recreate-deployment
+Jun  6 13:29:29.722: INFO: Watching deployment "test-recreate-deployment" to verify that new pods will not run with olds pods
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:59
+Jun  6 13:29:29.775: INFO: Deployment "test-recreate-deployment":
+&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-recreate-deployment,GenerateName:,Namespace:deployment-1290,SelfLink:/apis/apps/v1/namespaces/deployment-1290/deployments/test-recreate-deployment,UID:1a4af4dd-885f-11e9-bdc9-0231d0af67bc,ResourceVersion:14025,Generation:2,CreationTimestamp:2019-06-06 13:29:25 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,},Annotations:map[string]string{deployment.kubernetes.io/revision: 2,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:DeploymentSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},Strategy:DeploymentStrategy{Type:Recreate,RollingUpdate:nil,},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:1,UpdatedReplicas:1,AvailableReplicas:0,UnavailableReplicas:1,Conditions:[{Available False 2019-06-06 13:29:29 +0000 UTC 2019-06-06 13:29:29 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} {Progressing True 2019-06-06 13:29:29 +0000 UTC 2019-06-06 13:29:25 +0000 UTC ReplicaSetUpdated ReplicaSet "test-recreate-deployment-c9cbd8684" is progressing.}],ReadyReplicas:0,CollisionCount:nil,},}
+
+Jun  6 13:29:29.777: INFO: New ReplicaSet "test-recreate-deployment-c9cbd8684" of Deployment "test-recreate-deployment":
+&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-recreate-deployment-c9cbd8684,GenerateName:,Namespace:deployment-1290,SelfLink:/apis/apps/v1/namespaces/deployment-1290/replicasets/test-recreate-deployment-c9cbd8684,UID:1cc4eed8-885f-11e9-bdc9-0231d0af67bc,ResourceVersion:14024,Generation:1,CreationTimestamp:2019-06-06 13:29:29 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: c9cbd8684,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 1,deployment.kubernetes.io/revision: 2,},OwnerReferences:[{apps/v1 Deployment test-recreate-deployment 1a4af4dd-885f-11e9-bdc9-0231d0af67bc 0xc002868320 0xc002868321}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,pod-template-hash: c9cbd8684,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: c9cbd8684,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},}
+Jun  6 13:29:29.778: INFO: All old ReplicaSets of Deployment "test-recreate-deployment":
+Jun  6 13:29:29.778: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-recreate-deployment-7d57d5ff7c,GenerateName:,Namespace:deployment-1290,SelfLink:/apis/apps/v1/namespaces/deployment-1290/replicasets/test-recreate-deployment-7d57d5ff7c,UID:1a4bad79-885f-11e9-bdc9-0231d0af67bc,ResourceVersion:14013,Generation:2,CreationTimestamp:2019-06-06 13:29:25 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: 7d57d5ff7c,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 1,deployment.kubernetes.io/revision: 1,},OwnerReferences:[{apps/v1 Deployment test-recreate-deployment 1a4af4dd-885f-11e9-bdc9-0231d0af67bc 0xc002868257 0xc002868258}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*0,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,pod-template-hash: 7d57d5ff7c,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: 7d57d5ff7c,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},}
+Jun  6 13:29:29.780: INFO: Pod "test-recreate-deployment-c9cbd8684-njdcn" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-recreate-deployment-c9cbd8684-njdcn,GenerateName:test-recreate-deployment-c9cbd8684-,Namespace:deployment-1290,SelfLink:/api/v1/namespaces/deployment-1290/pods/test-recreate-deployment-c9cbd8684-njdcn,UID:1cc5453e-885f-11e9-bdc9-0231d0af67bc,ResourceVersion:14021,Generation:0,CreationTimestamp:2019-06-06 13:29:29 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: c9cbd8684,},Annotations:map[string]string{kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet test-recreate-deployment-c9cbd8684 1cc4eed8-885f-11e9-bdc9-0231d0af67bc 0xc002868b60 0xc002868b61}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-j6qgb {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-j6qgb,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-j6qgb true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-66-200.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002868bc0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002868be0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:29:29 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:29:29.780: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "deployment-1290" for this suite.
+Jun  6 13:29:35.790: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:29:35.969: INFO: namespace deployment-1290 deletion completed in 6.187455589s
+
+• [SLOW TEST:10.556 seconds]
+[sig-apps] Deployment
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  RecreateDeployment should delete old pods and create new ones [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+S
+------------------------------
+[sig-network] Services 
+  should serve multiport endpoints from pods  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:29:35.969: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename services
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in services-3677
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:86
+[It] should serve multiport endpoints from pods  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating service multi-endpoint-test in namespace services-3677
+STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-3677 to expose endpoints map[]
+Jun  6 13:29:36.109: INFO: successfully validated that service multi-endpoint-test in namespace services-3677 exposes endpoints map[] (5.450758ms elapsed)
+STEP: Creating pod pod1 in namespace services-3677
+STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-3677 to expose endpoints map[pod1:[100]]
+Jun  6 13:29:39.220: INFO: successfully validated that service multi-endpoint-test in namespace services-3677 exposes endpoints map[pod1:[100]] (3.10457411s elapsed)
+STEP: Creating pod pod2 in namespace services-3677
+STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-3677 to expose endpoints map[pod1:[100] pod2:[101]]
+Jun  6 13:29:41.244: INFO: successfully validated that service multi-endpoint-test in namespace services-3677 exposes endpoints map[pod1:[100] pod2:[101]] (2.019017308s elapsed)
+STEP: Deleting pod pod1 in namespace services-3677
+STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-3677 to expose endpoints map[pod2:[101]]
+Jun  6 13:29:41.257: INFO: successfully validated that service multi-endpoint-test in namespace services-3677 exposes endpoints map[pod2:[101]] (8.56011ms elapsed)
+STEP: Deleting pod pod2 in namespace services-3677
+STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-3677 to expose endpoints map[]
+Jun  6 13:29:41.266: INFO: successfully validated that service multi-endpoint-test in namespace services-3677 exposes endpoints map[] (3.070933ms elapsed)
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:29:41.276: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "services-3677" for this suite.
+Jun  6 13:29:47.286: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:29:47.353: INFO: namespace services-3677 deletion completed in 6.073573521s
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:91
+
+• [SLOW TEST:11.383 seconds]
+[sig-network] Services
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22
+  should serve multiport endpoints from pods  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-auth] ServiceAccounts 
+  should mount an API token into pods  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-auth] ServiceAccounts
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:29:47.353: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename svcaccounts
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in svcaccounts-679
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should mount an API token into pods  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: getting the auto-created API token
+STEP: reading a file in the container
+Jun  6 13:29:49.994: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-679 pod-service-account-27a42a56-885f-11e9-b613-8a9bc7c14a19 -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/token'
+STEP: reading a file in the container
+Jun  6 13:29:50.309: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-679 pod-service-account-27a42a56-885f-11e9-b613-8a9bc7c14a19 -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/ca.crt'
+STEP: reading a file in the container
+Jun  6 13:29:50.542: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-679 pod-service-account-27a42a56-885f-11e9-b613-8a9bc7c14a19 -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/namespace'
+[AfterEach] [sig-auth] ServiceAccounts
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:29:50.783: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "svcaccounts-679" for this suite.
+Jun  6 13:29:56.793: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:29:56.856: INFO: namespace svcaccounts-679 deletion completed in 6.070584061s
+
+• [SLOW TEST:9.503 seconds]
+[sig-auth] ServiceAccounts
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/auth/framework.go:22
+  should mount an API token into pods  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected configMap 
+  should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:29:56.856: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-6223
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating configMap with name projected-configmap-test-volume-2d011e62-885f-11e9-b613-8a9bc7c14a19
+STEP: Creating a pod to test consume configMaps
+Jun  6 13:29:56.993: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-2d01986c-885f-11e9-b613-8a9bc7c14a19" in namespace "projected-6223" to be "success or failure"
+Jun  6 13:29:56.998: INFO: Pod "pod-projected-configmaps-2d01986c-885f-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 5.577541ms
+Jun  6 13:29:59.001: INFO: Pod "pod-projected-configmaps-2d01986c-885f-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.008348575s
+Jun  6 13:30:01.004: INFO: Pod "pod-projected-configmaps-2d01986c-885f-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.010857062s
+STEP: Saw pod success
+Jun  6 13:30:01.004: INFO: Pod "pod-projected-configmaps-2d01986c-885f-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 13:30:01.005: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-projected-configmaps-2d01986c-885f-11e9-b613-8a9bc7c14a19 container projected-configmap-volume-test: 
+STEP: delete the pod
+Jun  6 13:30:01.022: INFO: Waiting for pod pod-projected-configmaps-2d01986c-885f-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 13:30:01.025: INFO: Pod pod-projected-configmaps-2d01986c-885f-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:30:01.025: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-6223" for this suite.
+Jun  6 13:30:07.034: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:30:07.103: INFO: namespace projected-6223 deletion completed in 6.076696873s
+
+• [SLOW TEST:10.247 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:33
+  should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl run job 
+  should create a job from an image when restart is OnFailure  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:30:07.104: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in kubectl-9746
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213
+[BeforeEach] [k8s.io] Kubectl run job
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1510
+[It] should create a job from an image when restart is OnFailure  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: running the image docker.io/library/nginx:1.14-alpine
+Jun  6 13:30:07.229: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 run e2e-test-nginx-job --restart=OnFailure --generator=job/v1 --image=docker.io/library/nginx:1.14-alpine --namespace=kubectl-9746'
+Jun  6 13:30:07.304: INFO: stderr: "kubectl run --generator=job/v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n"
+Jun  6 13:30:07.304: INFO: stdout: "job.batch/e2e-test-nginx-job created\n"
+STEP: verifying the job e2e-test-nginx-job was created
+[AfterEach] [k8s.io] Kubectl run job
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1515
+Jun  6 13:30:07.308: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 delete jobs e2e-test-nginx-job --namespace=kubectl-9746'
+Jun  6 13:30:07.378: INFO: stderr: ""
+Jun  6 13:30:07.379: INFO: stdout: "job.batch \"e2e-test-nginx-job\" deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:30:07.379: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-9746" for this suite.
+Jun  6 13:30:13.393: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:30:13.457: INFO: namespace kubectl-9746 deletion completed in 6.076515107s
+
+• [SLOW TEST:6.353 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl run job
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    should create a job from an image when restart is OnFailure  [Conformance]
+    /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook 
+  should execute prestop exec hook properly [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Container Lifecycle Hook
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:30:13.458: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename container-lifecycle-hook
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in container-lifecycle-hook-858
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] when create a pod with lifecycle hook
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:61
+STEP: create the container to handle the HTTPGet hook request.
+[It] should execute prestop exec hook properly [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: create the pod with lifecycle hook
+STEP: delete the pod with lifecycle hook
+Jun  6 13:30:19.617: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun  6 13:30:19.619: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun  6 13:30:21.619: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun  6 13:30:21.623: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun  6 13:30:23.619: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun  6 13:30:23.622: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun  6 13:30:25.619: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun  6 13:30:25.622: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun  6 13:30:27.619: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun  6 13:30:27.622: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun  6 13:30:29.619: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun  6 13:30:29.622: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun  6 13:30:31.619: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun  6 13:30:31.622: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun  6 13:30:33.619: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun  6 13:30:33.622: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun  6 13:30:35.619: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun  6 13:30:35.622: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun  6 13:30:37.619: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun  6 13:30:37.622: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun  6 13:30:39.619: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun  6 13:30:39.622: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun  6 13:30:41.619: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun  6 13:30:41.622: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun  6 13:30:43.619: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun  6 13:30:43.622: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun  6 13:30:45.619: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun  6 13:30:45.626: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun  6 13:30:47.619: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun  6 13:30:47.622: INFO: Pod pod-with-prestop-exec-hook no longer exists
+STEP: check prestop hook
+[AfterEach] [k8s.io] Container Lifecycle Hook
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:30:47.628: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-lifecycle-hook-858" for this suite.
+Jun  6 13:31:09.637: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:31:09.712: INFO: namespace container-lifecycle-hook-858 deletion completed in 22.081824348s
+
+• [SLOW TEST:56.254 seconds]
+[k8s.io] Container Lifecycle Hook
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  when create a pod with lifecycle hook
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:40
+    should execute prestop exec hook properly [NodeConformance] [Conformance]
+    /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSS
+------------------------------
+[sig-node] Downward API 
+  should provide pod UID as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-node] Downward API
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:31:09.712: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in downward-api-8790
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide pod UID as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward api env vars
+Jun  6 13:31:09.845: INFO: Waiting up to 5m0s for pod "downward-api-586e4ed2-885f-11e9-b613-8a9bc7c14a19" in namespace "downward-api-8790" to be "success or failure"
+Jun  6 13:31:09.850: INFO: Pod "downward-api-586e4ed2-885f-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 5.389565ms
+Jun  6 13:31:11.853: INFO: Pod "downward-api-586e4ed2-885f-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.008052175s
+STEP: Saw pod success
+Jun  6 13:31:11.853: INFO: Pod "downward-api-586e4ed2-885f-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 13:31:11.855: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod downward-api-586e4ed2-885f-11e9-b613-8a9bc7c14a19 container dapi-container: 
+STEP: delete the pod
+Jun  6 13:31:11.868: INFO: Waiting for pod downward-api-586e4ed2-885f-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 13:31:11.870: INFO: Pod downward-api-586e4ed2-885f-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-node] Downward API
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:31:11.870: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-8790" for this suite.
+Jun  6 13:31:17.882: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:31:17.949: INFO: namespace downward-api-8790 deletion completed in 6.076240962s
+
+• [SLOW TEST:8.236 seconds]
+[sig-node] Downward API
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:38
+  should provide pod UID as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Guestbook application 
+  should create and stop a working application  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:31:17.949: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in kubectl-219
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213
+[It] should create and stop a working application  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating all guestbook components
+Jun  6 13:31:18.073: INFO: apiVersion: v1
+kind: Service
+metadata:
+  name: redis-slave
+  labels:
+    app: redis
+    role: slave
+    tier: backend
+spec:
+  ports:
+  - port: 6379
+  selector:
+    app: redis
+    role: slave
+    tier: backend
+
+Jun  6 13:31:18.073: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 create -f - --namespace=kubectl-219'
+Jun  6 13:31:18.230: INFO: stderr: ""
+Jun  6 13:31:18.230: INFO: stdout: "service/redis-slave created\n"
+Jun  6 13:31:18.230: INFO: apiVersion: v1
+kind: Service
+metadata:
+  name: redis-master
+  labels:
+    app: redis
+    role: master
+    tier: backend
+spec:
+  ports:
+  - port: 6379
+    targetPort: 6379
+  selector:
+    app: redis
+    role: master
+    tier: backend
+
+Jun  6 13:31:18.230: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 create -f - --namespace=kubectl-219'
+Jun  6 13:31:18.374: INFO: stderr: ""
+Jun  6 13:31:18.374: INFO: stdout: "service/redis-master created\n"
+Jun  6 13:31:18.374: INFO: apiVersion: v1
+kind: Service
+metadata:
+  name: frontend
+  labels:
+    app: guestbook
+    tier: frontend
+spec:
+  # if your cluster supports it, uncomment the following to automatically create
+  # an external load-balanced IP for the frontend service.
+  # type: LoadBalancer
+  ports:
+  - port: 80
+  selector:
+    app: guestbook
+    tier: frontend
+
+Jun  6 13:31:18.374: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 create -f - --namespace=kubectl-219'
+Jun  6 13:31:18.535: INFO: stderr: ""
+Jun  6 13:31:18.535: INFO: stdout: "service/frontend created\n"
+Jun  6 13:31:18.535: INFO: apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: frontend
+spec:
+  replicas: 3
+  selector:
+    matchLabels:
+      app: guestbook
+      tier: frontend
+  template:
+    metadata:
+      labels:
+        app: guestbook
+        tier: frontend
+    spec:
+      containers:
+      - name: php-redis
+        image: gcr.io/google-samples/gb-frontend:v6
+        resources:
+          requests:
+            cpu: 100m
+            memory: 100Mi
+        env:
+        - name: GET_HOSTS_FROM
+          value: dns
+          # If your cluster config does not include a dns service, then to
+          # instead access environment variables to find service host
+          # info, comment out the 'value: dns' line above, and uncomment the
+          # line below:
+          # value: env
+        ports:
+        - containerPort: 80
+
+Jun  6 13:31:18.535: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 create -f - --namespace=kubectl-219'
+Jun  6 13:31:18.671: INFO: stderr: ""
+Jun  6 13:31:18.671: INFO: stdout: "deployment.apps/frontend created\n"
+Jun  6 13:31:18.671: INFO: apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: redis-master
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: redis
+      role: master
+      tier: backend
+  template:
+    metadata:
+      labels:
+        app: redis
+        role: master
+        tier: backend
+    spec:
+      containers:
+      - name: master
+        image: gcr.io/kubernetes-e2e-test-images/redis:1.0
+        resources:
+          requests:
+            cpu: 100m
+            memory: 100Mi
+        ports:
+        - containerPort: 6379
+
+Jun  6 13:31:18.671: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 create -f - --namespace=kubectl-219'
+Jun  6 13:31:18.865: INFO: stderr: ""
+Jun  6 13:31:18.865: INFO: stdout: "deployment.apps/redis-master created\n"
+Jun  6 13:31:18.865: INFO: apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: redis-slave
+spec:
+  replicas: 2
+  selector:
+    matchLabels:
+      app: redis
+      role: slave
+      tier: backend
+  template:
+    metadata:
+      labels:
+        app: redis
+        role: slave
+        tier: backend
+    spec:
+      containers:
+      - name: slave
+        image: gcr.io/google-samples/gb-redisslave:v3
+        resources:
+          requests:
+            cpu: 100m
+            memory: 100Mi
+        env:
+        - name: GET_HOSTS_FROM
+          value: dns
+          # If your cluster config does not include a dns service, then to
+          # instead access an environment variable to find the master
+          # service's host, comment out the 'value: dns' line above, and
+          # uncomment the line below:
+          # value: env
+        ports:
+        - containerPort: 6379
+
+Jun  6 13:31:18.865: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 create -f - --namespace=kubectl-219'
+Jun  6 13:31:19.035: INFO: stderr: ""
+Jun  6 13:31:19.035: INFO: stdout: "deployment.apps/redis-slave created\n"
+STEP: validating guestbook app
+Jun  6 13:31:19.035: INFO: Waiting for all frontend pods to be Running.
+Jun  6 13:31:34.086: INFO: Waiting for frontend to serve content.
+Jun  6 13:31:39.107: INFO: Failed to get response from guestbook. err: , response: 
+Fatal error: Uncaught exception 'Predis\Connection\ConnectionException' with message 'Connection timed out [tcp://redis-slave:6379]' in /usr/local/lib/php/Predis/Connection/AbstractConnection.php:155 +Stack trace: +#0 /usr/local/lib/php/Predis/Connection/StreamConnection.php(128): Predis\Connection\AbstractConnection->onConnectionError('Connection time...', 110) +#1 /usr/local/lib/php/Predis/Connection/StreamConnection.php(178): Predis\Connection\StreamConnection->createStreamSocket(Object(Predis\Connection\Parameters), 'tcp://redis-sla...', 4) +#2 /usr/local/lib/php/Predis/Connection/StreamConnection.php(100): Predis\Connection\StreamConnection->tcpStreamInitializer(Object(Predis\Connection\Parameters)) +#3 /usr/local/lib/php/Predis/Connection/AbstractConnection.php(81): Predis\Connection\StreamConnection->createResource() +#4 /usr/local/lib/php/Predis/Connection/StreamConnection.php(258): Predis\Connection\AbstractConnection->connect() +#5 /usr/local/lib/php/Predis/Connection/AbstractConnection.php(180): Predis\Connection\Stre in /usr/local/lib/php/Predis/Connection/AbstractConnection.php on line 155
+ +Jun 6 13:31:44.120: INFO: Trying to add a new entry to the guestbook. +Jun 6 13:31:44.134: INFO: Verifying that added entry can be retrieved. +STEP: using delete to clean up resources +Jun 6 13:31:44.142: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 delete --grace-period=0 --force -f - --namespace=kubectl-219' +Jun 6 13:31:44.221: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Jun 6 13:31:44.221: INFO: stdout: "service \"redis-slave\" force deleted\n" +STEP: using delete to clean up resources +Jun 6 13:31:44.222: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 delete --grace-period=0 --force -f - --namespace=kubectl-219' +Jun 6 13:31:44.290: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Jun 6 13:31:44.290: INFO: stdout: "service \"redis-master\" force deleted\n" +STEP: using delete to clean up resources +Jun 6 13:31:44.291: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 delete --grace-period=0 --force -f - --namespace=kubectl-219' +Jun 6 13:31:44.369: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Jun 6 13:31:44.369: INFO: stdout: "service \"frontend\" force deleted\n" +STEP: using delete to clean up resources +Jun 6 13:31:44.369: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 delete --grace-period=0 --force -f - --namespace=kubectl-219' +Jun 6 13:31:44.472: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Jun 6 13:31:44.472: INFO: stdout: "deployment.apps \"frontend\" force deleted\n" +STEP: using delete to clean up resources +Jun 6 13:31:44.472: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 delete --grace-period=0 --force -f - --namespace=kubectl-219' +Jun 6 13:31:44.565: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Jun 6 13:31:44.565: INFO: stdout: "deployment.apps \"redis-master\" force deleted\n" +STEP: using delete to clean up resources +Jun 6 13:31:44.565: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 delete --grace-period=0 --force -f - --namespace=kubectl-219' +Jun 6 13:31:44.631: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Jun 6 13:31:44.631: INFO: stdout: "deployment.apps \"redis-slave\" force deleted\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:31:44.631: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-219" for this suite. +Jun 6 13:32:26.641: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:32:26.710: INFO: namespace kubectl-219 deletion completed in 42.07670992s + +• [SLOW TEST:68.762 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + [k8s.io] Guestbook application + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should create and stop a working application [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSS +------------------------------ +[sig-api-machinery] Garbage collector + should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-api-machinery] Garbage collector + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:32:26.711: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename gc +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in gc-9286 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: create the deployment +STEP: Wait for the Deployment to create new ReplicaSet +STEP: delete the deployment +STEP: wait for 30 seconds to see if the garbage collector mistakenly deletes the rs +STEP: Gathering metrics +W0606 13:32:56.948749 14 metrics_grabber.go:79] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled. +Jun 6 13:32:56.948: INFO: For apiserver_request_total: +For apiserver_request_latencies_summary: +For apiserver_init_events_total: +For garbage_collector_attempt_to_delete_queue_latency: +For garbage_collector_attempt_to_delete_work_duration: +For garbage_collector_attempt_to_orphan_queue_latency: +For garbage_collector_attempt_to_orphan_work_duration: +For garbage_collector_dirty_processing_latency_microseconds: +For garbage_collector_event_processing_latency_microseconds: +For garbage_collector_graph_changes_queue_latency: +For garbage_collector_graph_changes_work_duration: +For garbage_collector_orphan_processing_latency_microseconds: +For namespace_queue_latency: +For namespace_queue_latency_sum: +For namespace_queue_latency_count: +For namespace_retries: +For namespace_work_duration: +For namespace_work_duration_sum: +For namespace_work_duration_count: +For function_duration_seconds: +For errors_total: +For evicted_pods_total: + +[AfterEach] [sig-api-machinery] Garbage collector + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:32:56.948: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "gc-9286" for this suite. +Jun 6 13:33:02.958: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:33:03.042: INFO: namespace gc-9286 deletion completed in 6.09190542s + +• [SLOW TEST:36.331 seconds] +[sig-api-machinery] Garbage collector +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +[sig-storage] Projected configMap + updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Projected configMap + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:33:03.042: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-6274 +STEP: Waiting for a default service account to be provisioned in namespace +[It] updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating projection with configMap that has name projected-configmap-test-upd-9c030d3a-885f-11e9-b613-8a9bc7c14a19 +STEP: Creating the pod +STEP: Updating configmap projected-configmap-test-upd-9c030d3a-885f-11e9-b613-8a9bc7c14a19 +STEP: waiting to observe update in volume +[AfterEach] [sig-storage] Projected configMap + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:33:07.264: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-6274" for this suite. +Jun 6 13:33:29.273: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:33:29.344: INFO: namespace projected-6274 deletion completed in 22.077236142s + +• [SLOW TEST:26.301 seconds] +[sig-storage] Projected configMap +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:33 + updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +[sig-cli] Kubectl client [k8s.io] Update Demo + should create and stop a replication controller [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:33:29.344: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in kubectl-1866 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213 +[BeforeEach] [k8s.io] Update Demo + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:265 +[It] should create and stop a replication controller [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: creating a replication controller +Jun 6 13:33:29.512: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 create -f - --namespace=kubectl-1866' +Jun 6 13:33:29.823: INFO: stderr: "" +Jun 6 13:33:29.823: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n" +STEP: waiting for all containers in name=update-demo pods to come up. +Jun 6 13:33:29.823: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-1866' +Jun 6 13:33:29.898: INFO: stderr: "" +Jun 6 13:33:29.898: INFO: stdout: "update-demo-nautilus-4tkhb update-demo-nautilus-rt9gn " +Jun 6 13:33:29.898: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-nautilus-4tkhb -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-1866' +Jun 6 13:33:30.073: INFO: stderr: "" +Jun 6 13:33:30.073: INFO: stdout: "" +Jun 6 13:33:30.073: INFO: update-demo-nautilus-4tkhb is created but not running +Jun 6 13:33:35.073: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-1866' +Jun 6 13:33:35.137: INFO: stderr: "" +Jun 6 13:33:35.137: INFO: stdout: "update-demo-nautilus-4tkhb update-demo-nautilus-rt9gn " +Jun 6 13:33:35.137: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-nautilus-4tkhb -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-1866' +Jun 6 13:33:35.197: INFO: stderr: "" +Jun 6 13:33:35.197: INFO: stdout: "true" +Jun 6 13:33:35.197: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-nautilus-4tkhb -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-1866' +Jun 6 13:33:35.264: INFO: stderr: "" +Jun 6 13:33:35.264: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" +Jun 6 13:33:35.264: INFO: validating pod update-demo-nautilus-4tkhb +Jun 6 13:33:35.269: INFO: got data: { + "image": "nautilus.jpg" +} + +Jun 6 13:33:35.269: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Jun 6 13:33:35.269: INFO: update-demo-nautilus-4tkhb is verified up and running +Jun 6 13:33:35.269: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-nautilus-rt9gn -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-1866' +Jun 6 13:33:35.329: INFO: stderr: "" +Jun 6 13:33:35.329: INFO: stdout: "true" +Jun 6 13:33:35.329: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-nautilus-rt9gn -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-1866' +Jun 6 13:33:35.391: INFO: stderr: "" +Jun 6 13:33:35.392: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" +Jun 6 13:33:35.392: INFO: validating pod update-demo-nautilus-rt9gn +Jun 6 13:33:35.395: INFO: got data: { + "image": "nautilus.jpg" +} + +Jun 6 13:33:35.395: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Jun 6 13:33:35.396: INFO: update-demo-nautilus-rt9gn is verified up and running +STEP: using delete to clean up resources +Jun 6 13:33:35.396: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 delete --grace-period=0 --force -f - --namespace=kubectl-1866' +Jun 6 13:33:35.463: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Jun 6 13:33:35.463: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n" +Jun 6 13:33:35.463: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get rc,svc -l name=update-demo --no-headers --namespace=kubectl-1866' +Jun 6 13:33:35.536: INFO: stderr: "No resources found.\n" +Jun 6 13:33:35.536: INFO: stdout: "" +Jun 6 13:33:35.536: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods -l name=update-demo --namespace=kubectl-1866 -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' +Jun 6 13:33:35.598: INFO: stderr: "" +Jun 6 13:33:35.598: INFO: stdout: "update-demo-nautilus-4tkhb\nupdate-demo-nautilus-rt9gn\n" +Jun 6 13:33:36.098: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get rc,svc -l name=update-demo --no-headers --namespace=kubectl-1866' +Jun 6 13:33:36.164: INFO: stderr: "No resources found.\n" +Jun 6 13:33:36.164: INFO: stdout: "" +Jun 6 13:33:36.164: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods -l name=update-demo --namespace=kubectl-1866 -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' +Jun 6 13:33:36.225: INFO: stderr: "" +Jun 6 13:33:36.225: INFO: stdout: "" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:33:36.225: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-1866" for this suite. +Jun 6 13:33:58.235: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:33:58.349: INFO: namespace kubectl-1866 deletion completed in 22.121608155s + +• [SLOW TEST:29.005 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + [k8s.io] Update Demo + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should create and stop a replication controller [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Subpath Atomic writer volumes + should support subpaths with downward pod [LinuxOnly] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Subpath + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:33:58.350: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename subpath +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in subpath-4054 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] Atomic writer volumes + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38 +STEP: Setting up data +[It] should support subpaths with downward pod [LinuxOnly] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating pod pod-subpath-test-downwardapi-gffk +STEP: Creating a pod to test atomic-volume-subpath +Jun 6 13:33:58.487: INFO: Waiting up to 5m0s for pod "pod-subpath-test-downwardapi-gffk" in namespace "subpath-4054" to be "success or failure" +Jun 6 13:33:58.492: INFO: Pod "pod-subpath-test-downwardapi-gffk": Phase="Pending", Reason="", readiness=false. Elapsed: 5.104953ms +Jun 6 13:34:00.495: INFO: Pod "pod-subpath-test-downwardapi-gffk": Phase="Pending", Reason="", readiness=false. Elapsed: 2.00792092s +Jun 6 13:34:02.497: INFO: Pod "pod-subpath-test-downwardapi-gffk": Phase="Running", Reason="", readiness=true. Elapsed: 4.010381861s +Jun 6 13:34:04.499: INFO: Pod "pod-subpath-test-downwardapi-gffk": Phase="Running", Reason="", readiness=true. Elapsed: 6.012743236s +Jun 6 13:34:06.502: INFO: Pod "pod-subpath-test-downwardapi-gffk": Phase="Running", Reason="", readiness=true. Elapsed: 8.015281196s +Jun 6 13:34:08.504: INFO: Pod "pod-subpath-test-downwardapi-gffk": Phase="Running", Reason="", readiness=true. Elapsed: 10.017703455s +Jun 6 13:34:10.507: INFO: Pod "pod-subpath-test-downwardapi-gffk": Phase="Running", Reason="", readiness=true. Elapsed: 12.020699288s +Jun 6 13:34:12.510: INFO: Pod "pod-subpath-test-downwardapi-gffk": Phase="Running", Reason="", readiness=true. Elapsed: 14.023185189s +Jun 6 13:34:14.520: INFO: Pod "pod-subpath-test-downwardapi-gffk": Phase="Running", Reason="", readiness=true. Elapsed: 16.032853435s +Jun 6 13:34:16.522: INFO: Pod "pod-subpath-test-downwardapi-gffk": Phase="Running", Reason="", readiness=true. Elapsed: 18.035700719s +Jun 6 13:34:18.525: INFO: Pod "pod-subpath-test-downwardapi-gffk": Phase="Running", Reason="", readiness=true. Elapsed: 20.038125823s +Jun 6 13:34:20.527: INFO: Pod "pod-subpath-test-downwardapi-gffk": Phase="Succeeded", Reason="", readiness=false. Elapsed: 22.04065887s +STEP: Saw pod success +Jun 6 13:34:20.527: INFO: Pod "pod-subpath-test-downwardapi-gffk" satisfied condition "success or failure" +Jun 6 13:34:20.529: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-subpath-test-downwardapi-gffk container test-container-subpath-downwardapi-gffk: +STEP: delete the pod +Jun 6 13:34:20.544: INFO: Waiting for pod pod-subpath-test-downwardapi-gffk to disappear +Jun 6 13:34:20.546: INFO: Pod pod-subpath-test-downwardapi-gffk no longer exists +STEP: Deleting pod pod-subpath-test-downwardapi-gffk +Jun 6 13:34:20.546: INFO: Deleting pod "pod-subpath-test-downwardapi-gffk" in namespace "subpath-4054" +[AfterEach] [sig-storage] Subpath + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:34:20.547: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "subpath-4054" for this suite. +Jun 6 13:34:26.559: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:34:26.662: INFO: namespace subpath-4054 deletion completed in 6.11173469s + +• [SLOW TEST:28.312 seconds] +[sig-storage] Subpath +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22 + Atomic writer volumes + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34 + should support subpaths with downward pod [LinuxOnly] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Update Demo + should scale a replication controller [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:34:26.662: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in kubectl-6035 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213 +[BeforeEach] [k8s.io] Update Demo + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:265 +[It] should scale a replication controller [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: creating a replication controller +Jun 6 13:34:26.788: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 create -f - --namespace=kubectl-6035' +Jun 6 13:34:26.930: INFO: stderr: "" +Jun 6 13:34:26.930: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n" +STEP: waiting for all containers in name=update-demo pods to come up. +Jun 6 13:34:26.930: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-6035' +Jun 6 13:34:27.052: INFO: stderr: "" +Jun 6 13:34:27.052: INFO: stdout: "update-demo-nautilus-gzc9x update-demo-nautilus-l58pv " +Jun 6 13:34:27.052: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-nautilus-gzc9x -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-6035' +Jun 6 13:34:27.116: INFO: stderr: "" +Jun 6 13:34:27.116: INFO: stdout: "" +Jun 6 13:34:27.116: INFO: update-demo-nautilus-gzc9x is created but not running +Jun 6 13:34:32.116: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-6035' +Jun 6 13:34:32.181: INFO: stderr: "" +Jun 6 13:34:32.181: INFO: stdout: "update-demo-nautilus-gzc9x update-demo-nautilus-l58pv " +Jun 6 13:34:32.181: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-nautilus-gzc9x -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-6035' +Jun 6 13:34:32.243: INFO: stderr: "" +Jun 6 13:34:32.243: INFO: stdout: "true" +Jun 6 13:34:32.243: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-nautilus-gzc9x -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-6035' +Jun 6 13:34:32.308: INFO: stderr: "" +Jun 6 13:34:32.308: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" +Jun 6 13:34:32.308: INFO: validating pod update-demo-nautilus-gzc9x +Jun 6 13:34:32.311: INFO: got data: { + "image": "nautilus.jpg" +} + +Jun 6 13:34:32.311: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Jun 6 13:34:32.311: INFO: update-demo-nautilus-gzc9x is verified up and running +Jun 6 13:34:32.311: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-nautilus-l58pv -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-6035' +Jun 6 13:34:32.374: INFO: stderr: "" +Jun 6 13:34:32.374: INFO: stdout: "true" +Jun 6 13:34:32.374: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-nautilus-l58pv -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-6035' +Jun 6 13:34:32.436: INFO: stderr: "" +Jun 6 13:34:32.436: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" +Jun 6 13:34:32.436: INFO: validating pod update-demo-nautilus-l58pv +Jun 6 13:34:32.440: INFO: got data: { + "image": "nautilus.jpg" +} + +Jun 6 13:34:32.440: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Jun 6 13:34:32.440: INFO: update-demo-nautilus-l58pv is verified up and running +STEP: scaling down the replication controller +Jun 6 13:34:32.442: INFO: scanned /root for discovery docs: +Jun 6 13:34:32.442: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 scale rc update-demo-nautilus --replicas=1 --timeout=5m --namespace=kubectl-6035' +Jun 6 13:34:33.536: INFO: stderr: "" +Jun 6 13:34:33.536: INFO: stdout: "replicationcontroller/update-demo-nautilus scaled\n" +STEP: waiting for all containers in name=update-demo pods to come up. +Jun 6 13:34:33.536: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-6035' +Jun 6 13:34:33.600: INFO: stderr: "" +Jun 6 13:34:33.601: INFO: stdout: "update-demo-nautilus-gzc9x update-demo-nautilus-l58pv " +STEP: Replicas for name=update-demo: expected=1 actual=2 +Jun 6 13:34:38.601: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-6035' +Jun 6 13:34:38.664: INFO: stderr: "" +Jun 6 13:34:38.664: INFO: stdout: "update-demo-nautilus-l58pv " +Jun 6 13:34:38.664: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-nautilus-l58pv -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-6035' +Jun 6 13:34:38.724: INFO: stderr: "" +Jun 6 13:34:38.724: INFO: stdout: "true" +Jun 6 13:34:38.724: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-nautilus-l58pv -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-6035' +Jun 6 13:34:38.783: INFO: stderr: "" +Jun 6 13:34:38.783: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" +Jun 6 13:34:38.783: INFO: validating pod update-demo-nautilus-l58pv +Jun 6 13:34:38.786: INFO: got data: { + "image": "nautilus.jpg" +} + +Jun 6 13:34:38.786: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Jun 6 13:34:38.786: INFO: update-demo-nautilus-l58pv is verified up and running +STEP: scaling up the replication controller +Jun 6 13:34:38.788: INFO: scanned /root for discovery docs: +Jun 6 13:34:38.788: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 scale rc update-demo-nautilus --replicas=2 --timeout=5m --namespace=kubectl-6035' +Jun 6 13:34:39.891: INFO: stderr: "" +Jun 6 13:34:39.891: INFO: stdout: "replicationcontroller/update-demo-nautilus scaled\n" +STEP: waiting for all containers in name=update-demo pods to come up. +Jun 6 13:34:39.892: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-6035' +Jun 6 13:34:39.961: INFO: stderr: "" +Jun 6 13:34:39.961: INFO: stdout: "update-demo-nautilus-l58pv update-demo-nautilus-xdr7m " +Jun 6 13:34:39.961: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-nautilus-l58pv -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-6035' +Jun 6 13:34:40.023: INFO: stderr: "" +Jun 6 13:34:40.023: INFO: stdout: "true" +Jun 6 13:34:40.023: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-nautilus-l58pv -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-6035' +Jun 6 13:34:40.097: INFO: stderr: "" +Jun 6 13:34:40.097: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" +Jun 6 13:34:40.097: INFO: validating pod update-demo-nautilus-l58pv +Jun 6 13:34:40.100: INFO: got data: { + "image": "nautilus.jpg" +} + +Jun 6 13:34:40.100: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Jun 6 13:34:40.100: INFO: update-demo-nautilus-l58pv is verified up and running +Jun 6 13:34:40.100: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-nautilus-xdr7m -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-6035' +Jun 6 13:34:40.163: INFO: stderr: "" +Jun 6 13:34:40.163: INFO: stdout: "" +Jun 6 13:34:40.163: INFO: update-demo-nautilus-xdr7m is created but not running +Jun 6 13:34:45.163: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-6035' +Jun 6 13:34:45.230: INFO: stderr: "" +Jun 6 13:34:45.230: INFO: stdout: "update-demo-nautilus-l58pv update-demo-nautilus-xdr7m " +Jun 6 13:34:45.230: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-nautilus-l58pv -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-6035' +Jun 6 13:34:45.297: INFO: stderr: "" +Jun 6 13:34:45.297: INFO: stdout: "true" +Jun 6 13:34:45.297: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-nautilus-l58pv -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-6035' +Jun 6 13:34:45.359: INFO: stderr: "" +Jun 6 13:34:45.359: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" +Jun 6 13:34:45.359: INFO: validating pod update-demo-nautilus-l58pv +Jun 6 13:34:45.362: INFO: got data: { + "image": "nautilus.jpg" +} + +Jun 6 13:34:45.362: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Jun 6 13:34:45.362: INFO: update-demo-nautilus-l58pv is verified up and running +Jun 6 13:34:45.362: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-nautilus-xdr7m -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-6035' +Jun 6 13:34:45.425: INFO: stderr: "" +Jun 6 13:34:45.425: INFO: stdout: "true" +Jun 6 13:34:45.425: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods update-demo-nautilus-xdr7m -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-6035' +Jun 6 13:34:45.486: INFO: stderr: "" +Jun 6 13:34:45.486: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" +Jun 6 13:34:45.486: INFO: validating pod update-demo-nautilus-xdr7m +Jun 6 13:34:45.491: INFO: got data: { + "image": "nautilus.jpg" +} + +Jun 6 13:34:45.491: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Jun 6 13:34:45.491: INFO: update-demo-nautilus-xdr7m is verified up and running +STEP: using delete to clean up resources +Jun 6 13:34:45.491: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 delete --grace-period=0 --force -f - --namespace=kubectl-6035' +Jun 6 13:34:45.556: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Jun 6 13:34:45.556: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n" +Jun 6 13:34:45.556: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get rc,svc -l name=update-demo --no-headers --namespace=kubectl-6035' +Jun 6 13:34:45.624: INFO: stderr: "No resources found.\n" +Jun 6 13:34:45.624: INFO: stdout: "" +Jun 6 13:34:45.624: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods -l name=update-demo --namespace=kubectl-6035 -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' +Jun 6 13:34:45.705: INFO: stderr: "" +Jun 6 13:34:45.706: INFO: stdout: "update-demo-nautilus-l58pv\nupdate-demo-nautilus-xdr7m\n" +Jun 6 13:34:46.206: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get rc,svc -l name=update-demo --no-headers --namespace=kubectl-6035' +Jun 6 13:34:46.276: INFO: stderr: "No resources found.\n" +Jun 6 13:34:46.276: INFO: stdout: "" +Jun 6 13:34:46.276: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods -l name=update-demo --namespace=kubectl-6035 -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' +Jun 6 13:34:46.338: INFO: stderr: "" +Jun 6 13:34:46.338: INFO: stdout: "" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:34:46.338: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-6035" for this suite. +Jun 6 13:35:08.348: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:35:08.417: INFO: namespace kubectl-6035 deletion completed in 22.076657711s + +• [SLOW TEST:41.755 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + [k8s.io] Update Demo + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should scale a replication controller [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSS +------------------------------ +[sig-storage] Projected downwardAPI + should provide container's memory request [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:35:08.418: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-3909 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39 +[It] should provide container's memory request [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a pod to test downward API volume plugin +Jun 6 13:35:08.551: INFO: Waiting up to 5m0s for pod "downwardapi-volume-e6b5a386-885f-11e9-b613-8a9bc7c14a19" in namespace "projected-3909" to be "success or failure" +Jun 6 13:35:08.556: INFO: Pod "downwardapi-volume-e6b5a386-885f-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 4.662974ms +Jun 6 13:35:10.560: INFO: Pod "downwardapi-volume-e6b5a386-885f-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.008926821s +Jun 6 13:35:12.563: INFO: Pod "downwardapi-volume-e6b5a386-885f-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.011926451s +STEP: Saw pod success +Jun 6 13:35:12.563: INFO: Pod "downwardapi-volume-e6b5a386-885f-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure" +Jun 6 13:35:12.565: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod downwardapi-volume-e6b5a386-885f-11e9-b613-8a9bc7c14a19 container client-container: +STEP: delete the pod +Jun 6 13:35:12.582: INFO: Waiting for pod downwardapi-volume-e6b5a386-885f-11e9-b613-8a9bc7c14a19 to disappear +Jun 6 13:35:12.584: INFO: Pod downwardapi-volume-e6b5a386-885f-11e9-b613-8a9bc7c14a19 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 6 13:35:12.584: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-3909" for this suite. +Jun 6 13:35:18.594: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 6 13:35:18.663: INFO: namespace projected-3909 deletion completed in 6.076649677s + +• [SLOW TEST:10.245 seconds] +[sig-storage] Projected downwardAPI +/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33 + should provide container's memory request [NodeConformance] [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSS +------------------------------ +[sig-network] Proxy version v1 + should proxy logs on node with explicit kubelet port using proxy subresource [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] version v1 + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 6 13:35:18.664: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163 +STEP: Building a namespace api object, basename proxy +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in proxy-1947 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should proxy logs on node with explicit kubelet port using proxy subresource [Conformance] + /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +Jun 6 13:35:18.795: INFO: (0) /api/v1/nodes/ip-172-16-66-200.ec2.internal:10250/proxy/logs/:
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+
+amazon/
+apt/
+auth.log
+>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in downward-api-6305
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward api env vars
+Jun  6 13:35:25.070: INFO: Waiting up to 5m0s for pod "downward-api-f08e7f93-885f-11e9-b613-8a9bc7c14a19" in namespace "downward-api-6305" to be "success or failure"
+Jun  6 13:35:25.074: INFO: Pod "downward-api-f08e7f93-885f-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 3.843222ms
+Jun  6 13:35:27.077: INFO: Pod "downward-api-f08e7f93-885f-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006439697s
+STEP: Saw pod success
+Jun  6 13:35:27.077: INFO: Pod "downward-api-f08e7f93-885f-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 13:35:27.078: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod downward-api-f08e7f93-885f-11e9-b613-8a9bc7c14a19 container dapi-container: 
+STEP: delete the pod
+Jun  6 13:35:27.091: INFO: Waiting for pod downward-api-f08e7f93-885f-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 13:35:27.093: INFO: Pod downward-api-f08e7f93-885f-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-node] Downward API
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:35:27.093: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-6305" for this suite.
+Jun  6 13:35:33.102: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:35:33.170: INFO: namespace downward-api-6305 deletion completed in 6.075130951s
+
+• [SLOW TEST:8.230 seconds]
+[sig-node] Downward API
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:38
+  should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:35:33.171: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in downward-api-6824
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward API volume plugin
+Jun  6 13:35:33.306: INFO: Waiting up to 5m0s for pod "downwardapi-volume-f576efea-885f-11e9-b613-8a9bc7c14a19" in namespace "downward-api-6824" to be "success or failure"
+Jun  6 13:35:33.310: INFO: Pod "downwardapi-volume-f576efea-885f-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 4.150448ms
+Jun  6 13:35:35.313: INFO: Pod "downwardapi-volume-f576efea-885f-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006651666s
+STEP: Saw pod success
+Jun  6 13:35:35.313: INFO: Pod "downwardapi-volume-f576efea-885f-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 13:35:35.314: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod downwardapi-volume-f576efea-885f-11e9-b613-8a9bc7c14a19 container client-container: 
+STEP: delete the pod
+Jun  6 13:35:35.330: INFO: Waiting for pod downwardapi-volume-f576efea-885f-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 13:35:35.331: INFO: Pod downwardapi-volume-f576efea-885f-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:35:35.331: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-6824" for this suite.
+Jun  6 13:35:41.341: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:35:41.406: INFO: namespace downward-api-6824 deletion completed in 6.071812857s
+
+• [SLOW TEST:8.235 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-network] Networking Granular Checks: Pods 
+  should function for intra-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-network] Networking
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:35:41.406: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename pod-network-test
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in pod-network-test-4985
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should function for intra-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Performing setup for networking test in namespace pod-network-test-4985
+STEP: creating a selector
+STEP: Creating the service pods in kubernetes
+Jun  6 13:35:41.531: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable
+STEP: Creating test pods
+Jun  6 13:36:05.588: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://100.96.2.117:8080/dial?request=hostName&protocol=udp&host=100.96.2.116&port=8081&tries=1'] Namespace:pod-network-test-4985 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  6 13:36:05.588: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+Jun  6 13:36:05.798: INFO: Waiting for endpoints: map[]
+Jun  6 13:36:05.819: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://100.96.2.117:8080/dial?request=hostName&protocol=udp&host=100.96.1.32&port=8081&tries=1'] Namespace:pod-network-test-4985 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  6 13:36:05.819: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+Jun  6 13:36:05.990: INFO: Waiting for endpoints: map[]
+[AfterEach] [sig-network] Networking
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:36:05.991: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pod-network-test-4985" for this suite.
+Jun  6 13:36:28.000: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:36:28.154: INFO: namespace pod-network-test-4985 deletion completed in 22.160335887s
+
+• [SLOW TEST:46.748 seconds]
+[sig-network] Networking
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:25
+  Granular Checks: Pods
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:28
+    should function for intra-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance]
+    /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-scheduling] SchedulerPredicates [Serial] 
+  validates resource limits of pods that are allowed to run  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:36:28.155: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename sched-pred
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in sched-pred-5686
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:79
+Jun  6 13:36:28.279: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready
+Jun  6 13:36:28.283: INFO: Waiting for terminating namespaces to be deleted...
+Jun  6 13:36:28.285: INFO: 
+Logging pods the kubelet thinks is on node ip-172-16-66-200.ec2.internal before test
+Jun  6 13:36:28.294: INFO: k8s-api-haproxy-313ee916843387945fe68a625784d2a07122c117ee63e285821800170e69f652-ip-172-16-66-200.ec2.internal from kube-system started at  (0 container statuses recorded)
+Jun  6 13:36:28.294: INFO: kube-proxy-7a09f3d398339426fb2660a3d58c4b6a781901227d4954ccce4069e834b95d61-ip-172-16-66-200.ec2.internal from kube-system started at  (0 container statuses recorded)
+Jun  6 13:36:28.294: INFO: heapster-v1.6.0-beta.1-6979f49998-zrlsp from kube-system started at 2019-06-06 12:03:25 +0000 UTC (2 container statuses recorded)
+Jun  6 13:36:28.294: INFO: 	Container heapster ready: true, restart count 0
+Jun  6 13:36:28.294: INFO: 	Container heapster-nanny ready: true, restart count 0
+Jun  6 13:36:28.294: INFO: node-local-dns-vq5mj from kube-system started at 2019-06-06 12:02:59 +0000 UTC (1 container statuses recorded)
+Jun  6 13:36:28.294: INFO: 	Container node-cache ready: true, restart count 0
+Jun  6 13:36:28.294: INFO: kublr-logging-rabbitmq-0 from kube-system started at 2019-06-06 12:03:30 +0000 UTC (1 container statuses recorded)
+Jun  6 13:36:28.294: INFO: 	Container rabbitmq ready: true, restart count 0
+Jun  6 13:36:28.294: INFO: sonobuoy-systemd-logs-daemon-set-bc8f4f63e26f462d-d4vcf from heptio-sonobuoy started at 2019-06-06 12:54:36 +0000 UTC (2 container statuses recorded)
+Jun  6 13:36:28.294: INFO: 	Container sonobuoy-worker ready: true, restart count 0
+Jun  6 13:36:28.294: INFO: 	Container systemd-logs ready: true, restart count 0
+Jun  6 13:36:28.294: INFO: metrics-server-v0.3.1-7f597fc6fd-ljsdj from kube-system started at 2019-06-06 12:03:18 +0000 UTC (2 container statuses recorded)
+Jun  6 13:36:28.294: INFO: 	Container metrics-server ready: true, restart count 0
+Jun  6 13:36:28.294: INFO: 	Container metrics-server-nanny ready: true, restart count 0
+Jun  6 13:36:28.294: INFO: kublr-logging-fluentd-es-v2.0.2-pl5tm from kube-system started at 2019-06-06 12:03:30 +0000 UTC (1 container statuses recorded)
+Jun  6 13:36:28.294: INFO: 	Container fluentd-es ready: true, restart count 0
+Jun  6 13:36:28.294: INFO: kublr-logging-rabbitmq-exporter-85b669fcb9-dv2t2 from kube-system started at 2019-06-06 12:03:30 +0000 UTC (1 container statuses recorded)
+Jun  6 13:36:28.294: INFO: 	Container kublr-logging-rabbitmq-exporter ready: true, restart count 0
+Jun  6 13:36:28.294: INFO: canal-kgff5 from kube-system started at 2019-06-06 12:02:46 +0000 UTC (3 container statuses recorded)
+Jun  6 13:36:28.294: INFO: 	Container calico-node ready: true, restart count 0
+Jun  6 13:36:28.294: INFO: 	Container kube-flannel ready: true, restart count 0
+Jun  6 13:36:28.294: INFO: 	Container update-network-condition ready: true, restart count 0
+Jun  6 13:36:28.294: INFO: kublr-monitoring-kube-state-metrics-6fb9c7594b-zqb9d from kube-system started at 2019-06-06 12:03:51 +0000 UTC (2 container statuses recorded)
+Jun  6 13:36:28.294: INFO: 	Container addon-resizer ready: true, restart count 0
+Jun  6 13:36:28.294: INFO: 	Container kube-state-metrics ready: true, restart count 0
+Jun  6 13:36:28.294: INFO: 
+Logging pods the kubelet thinks is on node ip-172-16-89-18.ec2.internal before test
+Jun  6 13:36:28.303: INFO: kube-proxy-7a09f3d398339426fb2660a3d58c4b6a781901227d4954ccce4069e834b95d61-ip-172-16-89-18.ec2.internal from kube-system started at  (0 container statuses recorded)
+Jun  6 13:36:28.303: INFO: kube-dns-autoscaler-5d6dc48cb8-hnkfq from kube-system started at 2019-06-06 12:02:57 +0000 UTC (1 container statuses recorded)
+Jun  6 13:36:28.303: INFO: 	Container autoscaler ready: true, restart count 0
+Jun  6 13:36:28.303: INFO: kublr-monitoring-prometheus-fbf8fff5b-l4hmv from kube-system started at 2019-06-06 12:03:34 +0000 UTC (1 container statuses recorded)
+Jun  6 13:36:28.303: INFO: 	Container prometheus ready: true, restart count 0
+Jun  6 13:36:28.303: INFO: k8s-api-haproxy-313ee916843387945fe68a625784d2a07122c117ee63e285821800170e69f652-ip-172-16-89-18.ec2.internal from kube-system started at  (0 container statuses recorded)
+Jun  6 13:36:28.303: INFO: sonobuoy from heptio-sonobuoy started at 2019-06-06 12:54:33 +0000 UTC (1 container statuses recorded)
+Jun  6 13:36:28.303: INFO: 	Container kube-sonobuoy ready: true, restart count 0
+Jun  6 13:36:28.303: INFO: kublr-system-shell-84d985ff44-nwqdk from kube-system started at 2019-06-06 12:03:29 +0000 UTC (1 container statuses recorded)
+Jun  6 13:36:28.303: INFO: 	Container shell ready: true, restart count 0
+Jun  6 13:36:28.303: INFO: sonobuoy-systemd-logs-daemon-set-bc8f4f63e26f462d-k864b from heptio-sonobuoy started at 2019-06-06 12:54:36 +0000 UTC (2 container statuses recorded)
+Jun  6 13:36:28.303: INFO: 	Container sonobuoy-worker ready: true, restart count 0
+Jun  6 13:36:28.303: INFO: 	Container systemd-logs ready: true, restart count 0
+Jun  6 13:36:28.303: INFO: canal-pszff from kube-system started at 2019-06-06 12:02:46 +0000 UTC (3 container statuses recorded)
+Jun  6 13:36:28.303: INFO: 	Container calico-node ready: true, restart count 0
+Jun  6 13:36:28.303: INFO: 	Container kube-flannel ready: true, restart count 0
+Jun  6 13:36:28.303: INFO: 	Container update-network-condition ready: true, restart count 0
+Jun  6 13:36:28.303: INFO: kubernetes-dashboard-57c67b4666-9j6pn from kube-system started at 2019-06-06 12:02:57 +0000 UTC (1 container statuses recorded)
+Jun  6 13:36:28.303: INFO: 	Container kubernetes-dashboard ready: true, restart count 0
+Jun  6 13:36:28.303: INFO: coredns-fb8b8dccf-w9d7l from kube-system started at 2019-06-06 12:03:06 +0000 UTC (1 container statuses recorded)
+Jun  6 13:36:28.303: INFO: 	Container coredns ready: true, restart count 0
+Jun  6 13:36:28.303: INFO: node-local-dns-75dpv from kube-system started at 2019-06-06 12:02:57 +0000 UTC (1 container statuses recorded)
+Jun  6 13:36:28.303: INFO: 	Container node-cache ready: true, restart count 0
+Jun  6 13:36:28.303: INFO: tiller-deploy-89688d99f-c4mp7 from kube-system started at 2019-06-06 12:02:58 +0000 UTC (1 container statuses recorded)
+Jun  6 13:36:28.303: INFO: 	Container tiller ready: true, restart count 0
+Jun  6 13:36:28.303: INFO: kublr-logging-fluentd-es-v2.0.2-lxzrw from kube-system started at 2019-06-06 12:03:30 +0000 UTC (1 container statuses recorded)
+Jun  6 13:36:28.303: INFO: 	Container fluentd-es ready: true, restart count 0
+[It] validates resource limits of pods that are allowed to run  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: verifying the node has the label node ip-172-16-66-200.ec2.internal
+STEP: verifying the node has the label node ip-172-16-89-18.ec2.internal
+Jun  6 13:36:28.335: INFO: Pod sonobuoy requesting resource cpu=0m on Node ip-172-16-89-18.ec2.internal
+Jun  6 13:36:28.335: INFO: Pod sonobuoy-systemd-logs-daemon-set-bc8f4f63e26f462d-d4vcf requesting resource cpu=0m on Node ip-172-16-66-200.ec2.internal
+Jun  6 13:36:28.335: INFO: Pod sonobuoy-systemd-logs-daemon-set-bc8f4f63e26f462d-k864b requesting resource cpu=0m on Node ip-172-16-89-18.ec2.internal
+Jun  6 13:36:28.335: INFO: Pod canal-kgff5 requesting resource cpu=40m on Node ip-172-16-66-200.ec2.internal
+Jun  6 13:36:28.336: INFO: Pod canal-pszff requesting resource cpu=40m on Node ip-172-16-89-18.ec2.internal
+Jun  6 13:36:28.336: INFO: Pod coredns-fb8b8dccf-w9d7l requesting resource cpu=100m on Node ip-172-16-89-18.ec2.internal
+Jun  6 13:36:28.336: INFO: Pod heapster-v1.6.0-beta.1-6979f49998-zrlsp requesting resource cpu=138m on Node ip-172-16-66-200.ec2.internal
+Jun  6 13:36:28.336: INFO: Pod k8s-api-haproxy-313ee916843387945fe68a625784d2a07122c117ee63e285821800170e69f652-ip-172-16-66-200.ec2.internal requesting resource cpu=1m on Node ip-172-16-66-200.ec2.internal
+Jun  6 13:36:28.336: INFO: Pod k8s-api-haproxy-313ee916843387945fe68a625784d2a07122c117ee63e285821800170e69f652-ip-172-16-89-18.ec2.internal requesting resource cpu=1m on Node ip-172-16-89-18.ec2.internal
+Jun  6 13:36:28.336: INFO: Pod kube-dns-autoscaler-5d6dc48cb8-hnkfq requesting resource cpu=1m on Node ip-172-16-89-18.ec2.internal
+Jun  6 13:36:28.336: INFO: Pod kube-proxy-7a09f3d398339426fb2660a3d58c4b6a781901227d4954ccce4069e834b95d61-ip-172-16-66-200.ec2.internal requesting resource cpu=5m on Node ip-172-16-66-200.ec2.internal
+Jun  6 13:36:28.336: INFO: Pod kube-proxy-7a09f3d398339426fb2660a3d58c4b6a781901227d4954ccce4069e834b95d61-ip-172-16-89-18.ec2.internal requesting resource cpu=5m on Node ip-172-16-89-18.ec2.internal
+Jun  6 13:36:28.336: INFO: Pod kubernetes-dashboard-57c67b4666-9j6pn requesting resource cpu=5m on Node ip-172-16-89-18.ec2.internal
+Jun  6 13:36:28.336: INFO: Pod kublr-logging-fluentd-es-v2.0.2-lxzrw requesting resource cpu=150m on Node ip-172-16-89-18.ec2.internal
+Jun  6 13:36:28.336: INFO: Pod kublr-logging-fluentd-es-v2.0.2-pl5tm requesting resource cpu=150m on Node ip-172-16-66-200.ec2.internal
+Jun  6 13:36:28.336: INFO: Pod kublr-logging-rabbitmq-0 requesting resource cpu=400m on Node ip-172-16-66-200.ec2.internal
+Jun  6 13:36:28.336: INFO: Pod kublr-logging-rabbitmq-exporter-85b669fcb9-dv2t2 requesting resource cpu=10m on Node ip-172-16-66-200.ec2.internal
+Jun  6 13:36:28.336: INFO: Pod kublr-monitoring-kube-state-metrics-6fb9c7594b-zqb9d requesting resource cpu=113m on Node ip-172-16-66-200.ec2.internal
+Jun  6 13:36:28.336: INFO: Pod kublr-monitoring-prometheus-fbf8fff5b-l4hmv requesting resource cpu=700m on Node ip-172-16-89-18.ec2.internal
+Jun  6 13:36:28.336: INFO: Pod kublr-system-shell-84d985ff44-nwqdk requesting resource cpu=0m on Node ip-172-16-89-18.ec2.internal
+Jun  6 13:36:28.336: INFO: Pod metrics-server-v0.3.1-7f597fc6fd-ljsdj requesting resource cpu=98m on Node ip-172-16-66-200.ec2.internal
+Jun  6 13:36:28.336: INFO: Pod node-local-dns-75dpv requesting resource cpu=25m on Node ip-172-16-89-18.ec2.internal
+Jun  6 13:36:28.336: INFO: Pod node-local-dns-vq5mj requesting resource cpu=25m on Node ip-172-16-66-200.ec2.internal
+Jun  6 13:36:28.336: INFO: Pod tiller-deploy-89688d99f-c4mp7 requesting resource cpu=5m on Node ip-172-16-89-18.ec2.internal
+STEP: Starting Pods to consume most of the cluster CPU.
+STEP: Creating another pod that requires unavailable amount of CPU.
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-1644ebe1-8860-11e9-b613-8a9bc7c14a19.15a59f791504ae0f], Reason = [Scheduled], Message = [Successfully assigned sched-pred-5686/filler-pod-1644ebe1-8860-11e9-b613-8a9bc7c14a19 to ip-172-16-66-200.ec2.internal]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-1644ebe1-8860-11e9-b613-8a9bc7c14a19.15a59f794e86c292], Reason = [Pulled], Message = [Container image "k8s.gcr.io/pause:3.1" already present on machine]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-1644ebe1-8860-11e9-b613-8a9bc7c14a19.15a59f7952330344], Reason = [Created], Message = [Created container filler-pod-1644ebe1-8860-11e9-b613-8a9bc7c14a19]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-1644ebe1-8860-11e9-b613-8a9bc7c14a19.15a59f79651c18d5], Reason = [Started], Message = [Started container filler-pod-1644ebe1-8860-11e9-b613-8a9bc7c14a19]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-16460308-8860-11e9-b613-8a9bc7c14a19.15a59f7919607d6e], Reason = [Scheduled], Message = [Successfully assigned sched-pred-5686/filler-pod-16460308-8860-11e9-b613-8a9bc7c14a19 to ip-172-16-89-18.ec2.internal]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-16460308-8860-11e9-b613-8a9bc7c14a19.15a59f7949de0d06], Reason = [Pulled], Message = [Container image "k8s.gcr.io/pause:3.1" already present on machine]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-16460308-8860-11e9-b613-8a9bc7c14a19.15a59f794c405359], Reason = [Created], Message = [Created container filler-pod-16460308-8860-11e9-b613-8a9bc7c14a19]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-16460308-8860-11e9-b613-8a9bc7c14a19.15a59f795ae8235a], Reason = [Started], Message = [Started container filler-pod-16460308-8860-11e9-b613-8a9bc7c14a19]
+STEP: Considering event: 
+Type = [Warning], Name = [additional-pod.15a59f798d8725a0], Reason = [FailedScheduling], Message = [0/3 nodes are available: 1 node(s) had taints that the pod didn't tolerate, 2 Insufficient cpu.]
+STEP: removing the label node off the node ip-172-16-66-200.ec2.internal
+STEP: verifying the node doesn't have the label node
+STEP: removing the label node off the node ip-172-16-89-18.ec2.internal
+STEP: verifying the node doesn't have the label node
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:36:31.404: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "sched-pred-5686" for this suite.
+Jun  6 13:36:37.413: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:36:37.478: INFO: namespace sched-pred-5686 deletion completed in 6.071013399s
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:70
+
+• [SLOW TEST:9.323 seconds]
+[sig-scheduling] SchedulerPredicates [Serial]
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:22
+  validates resource limits of pods that are allowed to run  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should provide container's cpu limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:36:37.478: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-738
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should provide container's cpu limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward API volume plugin
+Jun  6 13:36:37.666: INFO: Waiting up to 5m0s for pod "downwardapi-volume-1bd37598-8860-11e9-b613-8a9bc7c14a19" in namespace "projected-738" to be "success or failure"
+Jun  6 13:36:37.668: INFO: Pod "downwardapi-volume-1bd37598-8860-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.043049ms
+Jun  6 13:36:39.671: INFO: Pod "downwardapi-volume-1bd37598-8860-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.00456039s
+STEP: Saw pod success
+Jun  6 13:36:39.671: INFO: Pod "downwardapi-volume-1bd37598-8860-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 13:36:39.676: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod downwardapi-volume-1bd37598-8860-11e9-b613-8a9bc7c14a19 container client-container: 
+STEP: delete the pod
+Jun  6 13:36:39.690: INFO: Waiting for pod downwardapi-volume-1bd37598-8860-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 13:36:39.692: INFO: Pod downwardapi-volume-1bd37598-8860-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:36:39.692: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-738" for this suite.
+Jun  6 13:36:45.701: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:36:45.769: INFO: namespace projected-738 deletion completed in 6.075179757s
+
+• [SLOW TEST:8.291 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should provide container's cpu limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] ReplicationController 
+  should serve a basic image on each replica with a public image  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-apps] ReplicationController
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:36:45.770: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename replication-controller
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in replication-controller-312
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should serve a basic image on each replica with a public image  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating replication controller my-hostname-basic-20bc1590-8860-11e9-b613-8a9bc7c14a19
+Jun  6 13:36:45.899: INFO: Pod name my-hostname-basic-20bc1590-8860-11e9-b613-8a9bc7c14a19: Found 0 pods out of 1
+Jun  6 13:36:50.902: INFO: Pod name my-hostname-basic-20bc1590-8860-11e9-b613-8a9bc7c14a19: Found 1 pods out of 1
+Jun  6 13:36:50.902: INFO: Ensuring all pods for ReplicationController "my-hostname-basic-20bc1590-8860-11e9-b613-8a9bc7c14a19" are running
+Jun  6 13:36:50.904: INFO: Pod "my-hostname-basic-20bc1590-8860-11e9-b613-8a9bc7c14a19-kc4sj" is running (conditions: [{Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-06-06 13:36:45 +0000 UTC Reason: Message:} {Type:Ready Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-06-06 13:36:47 +0000 UTC Reason: Message:} {Type:ContainersReady Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-06-06 13:36:47 +0000 UTC Reason: Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-06-06 13:36:45 +0000 UTC Reason: Message:}])
+Jun  6 13:36:50.904: INFO: Trying to dial the pod
+Jun  6 13:36:55.912: INFO: Controller my-hostname-basic-20bc1590-8860-11e9-b613-8a9bc7c14a19: Got expected result from replica 1 [my-hostname-basic-20bc1590-8860-11e9-b613-8a9bc7c14a19-kc4sj]: "my-hostname-basic-20bc1590-8860-11e9-b613-8a9bc7c14a19-kc4sj", 1 of 1 required successes so far
+[AfterEach] [sig-apps] ReplicationController
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:36:55.912: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "replication-controller-312" for this suite.
+Jun  6 13:37:01.921: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:37:02.070: INFO: namespace replication-controller-312 deletion completed in 6.155367189s
+
+• [SLOW TEST:16.301 seconds]
+[sig-apps] ReplicationController
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should serve a basic image on each replica with a public image  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:37:02.070: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in emptydir-8524
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test emptydir 0666 on tmpfs
+Jun  6 13:37:02.251: INFO: Waiting up to 5m0s for pod "pod-2a7b2231-8860-11e9-b613-8a9bc7c14a19" in namespace "emptydir-8524" to be "success or failure"
+Jun  6 13:37:02.255: INFO: Pod "pod-2a7b2231-8860-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 3.575727ms
+Jun  6 13:37:04.257: INFO: Pod "pod-2a7b2231-8860-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006147715s
+STEP: Saw pod success
+Jun  6 13:37:04.258: INFO: Pod "pod-2a7b2231-8860-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 13:37:04.259: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-2a7b2231-8860-11e9-b613-8a9bc7c14a19 container test-container: 
+STEP: delete the pod
+Jun  6 13:37:04.273: INFO: Waiting for pod pod-2a7b2231-8860-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 13:37:04.275: INFO: Pod pod-2a7b2231-8860-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:37:04.275: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-8524" for this suite.
+Jun  6 13:37:10.319: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:37:10.383: INFO: namespace emptydir-8524 deletion completed in 6.106612949s
+
+• [SLOW TEST:8.313 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should update labels on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:37:10.385: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-5925
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should update labels on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating the pod
+Jun  6 13:37:13.091: INFO: Successfully updated pod "labelsupdate2f6fd6f0-8860-11e9-b613-8a9bc7c14a19"
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:37:15.107: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-5925" for this suite.
+Jun  6 13:37:37.117: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:37:37.272: INFO: namespace projected-5925 deletion completed in 22.163056385s
+
+• [SLOW TEST:26.888 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should update labels on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] Daemon set [Serial] 
+  should update pod when spec was updated and update strategy is RollingUpdate [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:37:37.273: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename daemonsets
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in daemonsets-4410
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:102
+[It] should update pod when spec was updated and update strategy is RollingUpdate [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+Jun  6 13:37:37.411: INFO: Creating simple daemon set daemon-set
+STEP: Check that daemon pods launch on every node of the cluster.
+Jun  6 13:37:37.418: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:37:37.422: INFO: Number of nodes with available pods: 0
+Jun  6 13:37:37.422: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod
+Jun  6 13:37:38.425: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:37:38.427: INFO: Number of nodes with available pods: 0
+Jun  6 13:37:38.427: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod
+Jun  6 13:37:39.425: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:37:39.520: INFO: Number of nodes with available pods: 2
+Jun  6 13:37:39.520: INFO: Number of running nodes: 2, number of available pods: 2
+STEP: Update daemon pods image.
+STEP: Check that daemon pods images are updated.
+Jun  6 13:37:39.541: INFO: Wrong image for pod: daemon-set-f9sl7. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:39.541: INFO: Wrong image for pod: daemon-set-t99m5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:39.543: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:37:40.620: INFO: Wrong image for pod: daemon-set-f9sl7. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:40.620: INFO: Wrong image for pod: daemon-set-t99m5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:40.623: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:37:41.546: INFO: Wrong image for pod: daemon-set-f9sl7. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:41.546: INFO: Wrong image for pod: daemon-set-t99m5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:41.549: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:37:42.620: INFO: Wrong image for pod: daemon-set-f9sl7. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:42.620: INFO: Wrong image for pod: daemon-set-t99m5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:42.620: INFO: Pod daemon-set-t99m5 is not available
+Jun  6 13:37:42.623: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:37:43.546: INFO: Wrong image for pod: daemon-set-f9sl7. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:43.546: INFO: Wrong image for pod: daemon-set-t99m5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:43.546: INFO: Pod daemon-set-t99m5 is not available
+Jun  6 13:37:43.549: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:37:44.546: INFO: Wrong image for pod: daemon-set-f9sl7. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:44.546: INFO: Wrong image for pod: daemon-set-t99m5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:44.546: INFO: Pod daemon-set-t99m5 is not available
+Jun  6 13:37:44.548: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:37:45.546: INFO: Wrong image for pod: daemon-set-f9sl7. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:45.546: INFO: Wrong image for pod: daemon-set-t99m5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:45.546: INFO: Pod daemon-set-t99m5 is not available
+Jun  6 13:37:45.549: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:37:46.547: INFO: Wrong image for pod: daemon-set-f9sl7. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:46.548: INFO: Wrong image for pod: daemon-set-t99m5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:46.548: INFO: Pod daemon-set-t99m5 is not available
+Jun  6 13:37:46.551: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:37:47.546: INFO: Wrong image for pod: daemon-set-f9sl7. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:47.546: INFO: Wrong image for pod: daemon-set-t99m5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:47.546: INFO: Pod daemon-set-t99m5 is not available
+Jun  6 13:37:47.549: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:37:48.621: INFO: Wrong image for pod: daemon-set-f9sl7. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:48.621: INFO: Wrong image for pod: daemon-set-t99m5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:48.621: INFO: Pod daemon-set-t99m5 is not available
+Jun  6 13:37:48.623: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:37:49.551: INFO: Wrong image for pod: daemon-set-f9sl7. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:49.551: INFO: Wrong image for pod: daemon-set-t99m5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:49.551: INFO: Pod daemon-set-t99m5 is not available
+Jun  6 13:37:49.553: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:37:50.548: INFO: Wrong image for pod: daemon-set-f9sl7. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:50.548: INFO: Wrong image for pod: daemon-set-t99m5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:50.548: INFO: Pod daemon-set-t99m5 is not available
+Jun  6 13:37:50.550: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:37:51.550: INFO: Wrong image for pod: daemon-set-f9sl7. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:51.550: INFO: Wrong image for pod: daemon-set-t99m5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:51.550: INFO: Pod daemon-set-t99m5 is not available
+Jun  6 13:37:51.553: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:37:52.546: INFO: Wrong image for pod: daemon-set-f9sl7. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:52.546: INFO: Wrong image for pod: daemon-set-t99m5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:52.546: INFO: Pod daemon-set-t99m5 is not available
+Jun  6 13:37:52.549: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:37:53.546: INFO: Wrong image for pod: daemon-set-f9sl7. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:53.546: INFO: Wrong image for pod: daemon-set-t99m5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:53.546: INFO: Pod daemon-set-t99m5 is not available
+Jun  6 13:37:53.548: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:37:54.546: INFO: Wrong image for pod: daemon-set-f9sl7. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:54.546: INFO: Pod daemon-set-zqvnk is not available
+Jun  6 13:37:54.621: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:37:55.546: INFO: Wrong image for pod: daemon-set-f9sl7. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:55.546: INFO: Pod daemon-set-zqvnk is not available
+Jun  6 13:37:55.548: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:37:56.546: INFO: Wrong image for pod: daemon-set-f9sl7. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:56.549: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:37:57.547: INFO: Wrong image for pod: daemon-set-f9sl7. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  6 13:37:57.547: INFO: Pod daemon-set-f9sl7 is not available
+Jun  6 13:37:57.550: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:37:58.621: INFO: Pod daemon-set-pmq7t is not available
+Jun  6 13:37:58.623: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+STEP: Check that daemon pods are still running on every node of the cluster.
+Jun  6 13:37:58.625: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:37:58.626: INFO: Number of nodes with available pods: 1
+Jun  6 13:37:58.626: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod
+Jun  6 13:37:59.630: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:37:59.632: INFO: Number of nodes with available pods: 1
+Jun  6 13:37:59.632: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod
+Jun  6 13:38:00.629: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:38:00.632: INFO: Number of nodes with available pods: 2
+Jun  6 13:38:00.633: INFO: Number of running nodes: 2, number of available pods: 2
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:68
+STEP: Deleting DaemonSet "daemon-set"
+STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-4410, will wait for the garbage collector to delete the pods
+Jun  6 13:38:00.698: INFO: Deleting DaemonSet.extensions daemon-set took: 3.957238ms
+Jun  6 13:38:01.098: INFO: Terminating DaemonSet.extensions daemon-set pods took: 400.310402ms
+Jun  6 13:38:14.101: INFO: Number of nodes with available pods: 0
+Jun  6 13:38:14.101: INFO: Number of running nodes: 0, number of available pods: 0
+Jun  6 13:38:14.103: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/daemonsets-4410/daemonsets","resourceVersion":"15950"},"items":null}
+
+Jun  6 13:38:14.105: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/daemonsets-4410/pods","resourceVersion":"15951"},"items":null}
+
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:38:14.111: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "daemonsets-4410" for this suite.
+Jun  6 13:38:20.121: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:38:20.353: INFO: namespace daemonsets-4410 deletion completed in 6.239384029s
+
+• [SLOW TEST:43.080 seconds]
+[sig-apps] Daemon set [Serial]
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should update pod when spec was updated and update strategy is RollingUpdate [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should not be blocked by dependency circle [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:38:20.353: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename gc
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in gc-6358
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should not be blocked by dependency circle [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+Jun  6 13:38:20.499: INFO: pod1.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod3", UID:"591df98d-8860-11e9-bdc9-0231d0af67bc", Controller:(*bool)(0xc002225196), BlockOwnerDeletion:(*bool)(0xc002225197)}}
+Jun  6 13:38:20.503: INFO: pod2.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod1", UID:"591ccb6b-8860-11e9-bdc9-0231d0af67bc", Controller:(*bool)(0xc002225346), BlockOwnerDeletion:(*bool)(0xc002225347)}}
+Jun  6 13:38:20.527: INFO: pod3.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod2", UID:"591d6d36-8860-11e9-bdc9-0231d0af67bc", Controller:(*bool)(0xc0026c038e), BlockOwnerDeletion:(*bool)(0xc0026c038f)}}
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:38:25.535: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "gc-6358" for this suite.
+Jun  6 13:38:31.544: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:38:31.753: INFO: namespace gc-6358 deletion completed in 6.216249888s
+
+• [SLOW TEST:11.400 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should not be blocked by dependency circle [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSS
+------------------------------
+[sig-network] Services 
+  should serve a basic endpoint from pods  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:38:31.754: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename services
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in services-7075
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:86
+[It] should serve a basic endpoint from pods  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating service endpoint-test2 in namespace services-7075
+STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-7075 to expose endpoints map[]
+Jun  6 13:38:31.941: INFO: successfully validated that service endpoint-test2 in namespace services-7075 exposes endpoints map[] (3.050713ms elapsed)
+STEP: Creating pod pod1 in namespace services-7075
+STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-7075 to expose endpoints map[pod1:[80]]
+Jun  6 13:38:33.962: INFO: successfully validated that service endpoint-test2 in namespace services-7075 exposes endpoints map[pod1:[80]] (2.015677325s elapsed)
+STEP: Creating pod pod2 in namespace services-7075
+STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-7075 to expose endpoints map[pod1:[80] pod2:[80]]
+Jun  6 13:38:36.992: INFO: successfully validated that service endpoint-test2 in namespace services-7075 exposes endpoints map[pod1:[80] pod2:[80]] (3.024134228s elapsed)
+STEP: Deleting pod pod1 in namespace services-7075
+STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-7075 to expose endpoints map[pod2:[80]]
+Jun  6 13:38:38.022: INFO: successfully validated that service endpoint-test2 in namespace services-7075 exposes endpoints map[pod2:[80]] (1.025295024s elapsed)
+STEP: Deleting pod pod2 in namespace services-7075
+STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-7075 to expose endpoints map[]
+Jun  6 13:38:38.127: INFO: successfully validated that service endpoint-test2 in namespace services-7075 exposes endpoints map[] (5.172307ms elapsed)
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:38:38.139: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "services-7075" for this suite.
+Jun  6 13:39:00.148: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:39:00.242: INFO: namespace services-7075 deletion completed in 22.100212046s
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:91
+
+• [SLOW TEST:28.488 seconds]
+[sig-network] Services
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22
+  should serve a basic endpoint from pods  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Watchers 
+  should observe add, update, and delete watch notifications on configmaps [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:39:00.242: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename watch
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in watch-981
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should observe add, update, and delete watch notifications on configmaps [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating a watch on configmaps with label A
+STEP: creating a watch on configmaps with label B
+STEP: creating a watch on configmaps with label A or B
+STEP: creating a configmap with label A and ensuring the correct watchers observe the notification
+Jun  6 13:39:00.372: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:watch-981,SelfLink:/api/v1/namespaces/watch-981/configmaps/e2e-watch-test-configmap-a,UID:70e38785-8860-11e9-bdc9-0231d0af67bc,ResourceVersion:16162,Generation:0,CreationTimestamp:2019-06-06 13:39:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{},BinaryData:map[string][]byte{},}
+Jun  6 13:39:00.373: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:watch-981,SelfLink:/api/v1/namespaces/watch-981/configmaps/e2e-watch-test-configmap-a,UID:70e38785-8860-11e9-bdc9-0231d0af67bc,ResourceVersion:16162,Generation:0,CreationTimestamp:2019-06-06 13:39:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{},BinaryData:map[string][]byte{},}
+STEP: modifying configmap A and ensuring the correct watchers observe the notification
+Jun  6 13:39:10.377: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:watch-981,SelfLink:/api/v1/namespaces/watch-981/configmaps/e2e-watch-test-configmap-a,UID:70e38785-8860-11e9-bdc9-0231d0af67bc,ResourceVersion:16178,Generation:0,CreationTimestamp:2019-06-06 13:39:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},}
+Jun  6 13:39:10.377: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:watch-981,SelfLink:/api/v1/namespaces/watch-981/configmaps/e2e-watch-test-configmap-a,UID:70e38785-8860-11e9-bdc9-0231d0af67bc,ResourceVersion:16178,Generation:0,CreationTimestamp:2019-06-06 13:39:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},}
+STEP: modifying configmap A again and ensuring the correct watchers observe the notification
+Jun  6 13:39:20.382: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:watch-981,SelfLink:/api/v1/namespaces/watch-981/configmaps/e2e-watch-test-configmap-a,UID:70e38785-8860-11e9-bdc9-0231d0af67bc,ResourceVersion:16193,Generation:0,CreationTimestamp:2019-06-06 13:39:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+Jun  6 13:39:20.383: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:watch-981,SelfLink:/api/v1/namespaces/watch-981/configmaps/e2e-watch-test-configmap-a,UID:70e38785-8860-11e9-bdc9-0231d0af67bc,ResourceVersion:16193,Generation:0,CreationTimestamp:2019-06-06 13:39:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+STEP: deleting configmap A and ensuring the correct watchers observe the notification
+Jun  6 13:39:30.387: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:watch-981,SelfLink:/api/v1/namespaces/watch-981/configmaps/e2e-watch-test-configmap-a,UID:70e38785-8860-11e9-bdc9-0231d0af67bc,ResourceVersion:16209,Generation:0,CreationTimestamp:2019-06-06 13:39:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+Jun  6 13:39:30.387: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:watch-981,SelfLink:/api/v1/namespaces/watch-981/configmaps/e2e-watch-test-configmap-a,UID:70e38785-8860-11e9-bdc9-0231d0af67bc,ResourceVersion:16209,Generation:0,CreationTimestamp:2019-06-06 13:39:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+STEP: creating a configmap with label B and ensuring the correct watchers observe the notification
+Jun  6 13:39:40.391: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-b,GenerateName:,Namespace:watch-981,SelfLink:/api/v1/namespaces/watch-981/configmaps/e2e-watch-test-configmap-b,UID:88bdbb7c-8860-11e9-bdc9-0231d0af67bc,ResourceVersion:16225,Generation:0,CreationTimestamp:2019-06-06 13:39:40 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-B,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{},BinaryData:map[string][]byte{},}
+Jun  6 13:39:40.391: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-b,GenerateName:,Namespace:watch-981,SelfLink:/api/v1/namespaces/watch-981/configmaps/e2e-watch-test-configmap-b,UID:88bdbb7c-8860-11e9-bdc9-0231d0af67bc,ResourceVersion:16225,Generation:0,CreationTimestamp:2019-06-06 13:39:40 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-B,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{},BinaryData:map[string][]byte{},}
+STEP: deleting configmap B and ensuring the correct watchers observe the notification
+Jun  6 13:39:50.396: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-b,GenerateName:,Namespace:watch-981,SelfLink:/api/v1/namespaces/watch-981/configmaps/e2e-watch-test-configmap-b,UID:88bdbb7c-8860-11e9-bdc9-0231d0af67bc,ResourceVersion:16242,Generation:0,CreationTimestamp:2019-06-06 13:39:40 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-B,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{},BinaryData:map[string][]byte{},}
+Jun  6 13:39:50.396: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-b,GenerateName:,Namespace:watch-981,SelfLink:/api/v1/namespaces/watch-981/configmaps/e2e-watch-test-configmap-b,UID:88bdbb7c-8860-11e9-bdc9-0231d0af67bc,ResourceVersion:16242,Generation:0,CreationTimestamp:2019-06-06 13:39:40 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-B,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{},BinaryData:map[string][]byte{},}
+[AfterEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:40:00.396: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "watch-981" for this suite.
+Jun  6 13:40:06.406: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:40:06.550: INFO: namespace watch-981 deletion completed in 6.151950074s
+
+• [SLOW TEST:66.308 seconds]
+[sig-api-machinery] Watchers
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should observe add, update, and delete watch notifications on configmaps [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl logs 
+  should be able to retrieve and filter logs  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:40:06.551: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in kubectl-5950
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213
+[BeforeEach] [k8s.io] Kubectl logs
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1190
+STEP: creating an rc
+Jun  6 13:40:06.676: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 create -f - --namespace=kubectl-5950'
+Jun  6 13:40:06.806: INFO: stderr: ""
+Jun  6 13:40:06.806: INFO: stdout: "replicationcontroller/redis-master created\n"
+[It] should be able to retrieve and filter logs  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Waiting for Redis master to start.
+Jun  6 13:40:07.809: INFO: Selector matched 1 pods for map[app:redis]
+Jun  6 13:40:07.809: INFO: Found 0 / 1
+Jun  6 13:40:08.809: INFO: Selector matched 1 pods for map[app:redis]
+Jun  6 13:40:08.809: INFO: Found 0 / 1
+Jun  6 13:40:09.809: INFO: Selector matched 1 pods for map[app:redis]
+Jun  6 13:40:09.809: INFO: Found 1 / 1
+Jun  6 13:40:09.809: INFO: WaitFor completed with timeout 5m0s.  Pods found = 1 out of 1
+Jun  6 13:40:09.811: INFO: Selector matched 1 pods for map[app:redis]
+Jun  6 13:40:09.811: INFO: ForEach: Found 1 pods from the filter.  Now looping through them.
+STEP: checking for a matching strings
+Jun  6 13:40:09.811: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 logs redis-master-rtw4f redis-master --namespace=kubectl-5950'
+Jun  6 13:40:09.884: INFO: stderr: ""
+Jun  6 13:40:09.884: INFO: stdout: "                _._                                                  \n           _.-``__ ''-._                                             \n      _.-``    `.  `_.  ''-._           Redis 3.2.12 (35a5711f/0) 64 bit\n  .-`` .-```.  ```\\/    _.,_ ''-._                                   \n (    '      ,       .-`  | `,    )     Running in standalone mode\n |`-._`-...-` __...-.``-._|'` _.-'|     Port: 6379\n |    `-._   `._    /     _.-'    |     PID: 1\n  `-._    `-._  `-./  _.-'    _.-'                                   \n |`-._`-._    `-.__.-'    _.-'_.-'|                                  \n |    `-._`-._        _.-'_.-'    |           http://redis.io        \n  `-._    `-._`-.__.-'_.-'    _.-'                                   \n |`-._`-._    `-.__.-'    _.-'_.-'|                                  \n |    `-._`-._        _.-'_.-'    |                                  \n  `-._    `-._`-.__.-'_.-'    _.-'                                   \n      `-._    `-.__.-'    _.-'                                       \n          `-._        _.-'                                           \n              `-.__.-'                                               \n\n1:M 06 Jun 13:40:07.837 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.\n1:M 06 Jun 13:40:07.837 # Server started, Redis version 3.2.12\n1:M 06 Jun 13:40:07.837 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.\n1:M 06 Jun 13:40:07.837 * The server is now ready to accept connections on port 6379\n"
+STEP: limiting log lines
+Jun  6 13:40:09.884: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 log redis-master-rtw4f redis-master --namespace=kubectl-5950 --tail=1'
+Jun  6 13:40:09.953: INFO: stderr: ""
+Jun  6 13:40:09.953: INFO: stdout: "1:M 06 Jun 13:40:07.837 * The server is now ready to accept connections on port 6379\n"
+STEP: limiting log bytes
+Jun  6 13:40:09.953: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 log redis-master-rtw4f redis-master --namespace=kubectl-5950 --limit-bytes=1'
+Jun  6 13:40:10.035: INFO: stderr: ""
+Jun  6 13:40:10.035: INFO: stdout: " "
+STEP: exposing timestamps
+Jun  6 13:40:10.035: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 log redis-master-rtw4f redis-master --namespace=kubectl-5950 --tail=1 --timestamps'
+Jun  6 13:40:10.109: INFO: stderr: ""
+Jun  6 13:40:10.109: INFO: stdout: "2019-06-06T13:40:07.838791312Z 1:M 06 Jun 13:40:07.837 * The server is now ready to accept connections on port 6379\n"
+STEP: restricting to a time range
+Jun  6 13:40:12.609: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 log redis-master-rtw4f redis-master --namespace=kubectl-5950 --since=1s'
+Jun  6 13:40:12.684: INFO: stderr: ""
+Jun  6 13:40:12.684: INFO: stdout: ""
+Jun  6 13:40:12.684: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 log redis-master-rtw4f redis-master --namespace=kubectl-5950 --since=24h'
+Jun  6 13:40:12.752: INFO: stderr: ""
+Jun  6 13:40:12.752: INFO: stdout: "                _._                                                  \n           _.-``__ ''-._                                             \n      _.-``    `.  `_.  ''-._           Redis 3.2.12 (35a5711f/0) 64 bit\n  .-`` .-```.  ```\\/    _.,_ ''-._                                   \n (    '      ,       .-`  | `,    )     Running in standalone mode\n |`-._`-...-` __...-.``-._|'` _.-'|     Port: 6379\n |    `-._   `._    /     _.-'    |     PID: 1\n  `-._    `-._  `-./  _.-'    _.-'                                   \n |`-._`-._    `-.__.-'    _.-'_.-'|                                  \n |    `-._`-._        _.-'_.-'    |           http://redis.io        \n  `-._    `-._`-.__.-'_.-'    _.-'                                   \n |`-._`-._    `-.__.-'    _.-'_.-'|                                  \n |    `-._`-._        _.-'_.-'    |                                  \n  `-._    `-._`-.__.-'_.-'    _.-'                                   \n      `-._    `-.__.-'    _.-'                                       \n          `-._        _.-'                                           \n              `-.__.-'                                               \n\n1:M 06 Jun 13:40:07.837 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.\n1:M 06 Jun 13:40:07.837 # Server started, Redis version 3.2.12\n1:M 06 Jun 13:40:07.837 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.\n1:M 06 Jun 13:40:07.837 * The server is now ready to accept connections on port 6379\n"
+[AfterEach] [k8s.io] Kubectl logs
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1196
+STEP: using delete to clean up resources
+Jun  6 13:40:12.753: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 delete --grace-period=0 --force -f - --namespace=kubectl-5950'
+Jun  6 13:40:12.822: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+Jun  6 13:40:12.822: INFO: stdout: "replicationcontroller \"redis-master\" force deleted\n"
+Jun  6 13:40:12.822: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get rc,svc -l name=nginx --no-headers --namespace=kubectl-5950'
+Jun  6 13:40:12.897: INFO: stderr: "No resources found.\n"
+Jun  6 13:40:12.897: INFO: stdout: ""
+Jun  6 13:40:12.897: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pods -l name=nginx --namespace=kubectl-5950 -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}'
+Jun  6 13:40:12.967: INFO: stderr: ""
+Jun  6 13:40:12.967: INFO: stdout: ""
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:40:12.967: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-5950" for this suite.
+Jun  6 13:40:34.976: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:40:35.071: INFO: namespace kubectl-5950 deletion completed in 22.101406542s
+
+• [SLOW TEST:28.519 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl logs
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    should be able to retrieve and filter logs  [Conformance]
+    /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSS
+------------------------------
+[sig-apps] Deployment 
+  deployment should delete old replica sets [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:40:35.071: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename deployment
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in deployment-1786
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:65
+[It] deployment should delete old replica sets [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+Jun  6 13:40:35.204: INFO: Pod name cleanup-pod: Found 0 pods out of 1
+Jun  6 13:40:40.206: INFO: Pod name cleanup-pod: Found 1 pods out of 1
+STEP: ensuring each pod is running
+Jun  6 13:40:40.206: INFO: Creating deployment test-cleanup-deployment
+STEP: Waiting for deployment test-cleanup-deployment history to be cleaned up
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:59
+Jun  6 13:40:42.237: INFO: Deployment "test-cleanup-deployment":
+&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-cleanup-deployment,GenerateName:,Namespace:deployment-1786,SelfLink:/apis/apps/v1/namespaces/deployment-1786/deployments/test-cleanup-deployment,UID:ac65bc83-8860-11e9-bdc9-0231d0af67bc,ResourceVersion:16407,Generation:1,CreationTimestamp:2019-06-06 13:40:40 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,},Annotations:map[string]string{deployment.kubernetes.io/revision: 1,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:DeploymentSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*0,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[{Available True 2019-06-06 13:40:40 +0000 UTC 2019-06-06 13:40:40 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} {Progressing True 2019-06-06 13:40:41 +0000 UTC 2019-06-06 13:40:40 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-cleanup-deployment-55cbfbc8f5" has successfully progressed.}],ReadyReplicas:1,CollisionCount:nil,},}
+
+Jun  6 13:40:42.239: INFO: New ReplicaSet "test-cleanup-deployment-55cbfbc8f5" of Deployment "test-cleanup-deployment":
+&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-cleanup-deployment-55cbfbc8f5,GenerateName:,Namespace:deployment-1786,SelfLink:/apis/apps/v1/namespaces/deployment-1786/replicasets/test-cleanup-deployment-55cbfbc8f5,UID:ac66be16-8860-11e9-bdc9-0231d0af67bc,ResourceVersion:16397,Generation:1,CreationTimestamp:2019-06-06 13:40:40 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,pod-template-hash: 55cbfbc8f5,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 1,},OwnerReferences:[{apps/v1 Deployment test-cleanup-deployment ac65bc83-8860-11e9-bdc9-0231d0af67bc 0xc002225a17 0xc002225a18}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,pod-template-hash: 55cbfbc8f5,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,pod-template-hash: 55cbfbc8f5,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[],},}
+Jun  6 13:40:42.241: INFO: Pod "test-cleanup-deployment-55cbfbc8f5-g7ld6" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-cleanup-deployment-55cbfbc8f5-g7ld6,GenerateName:test-cleanup-deployment-55cbfbc8f5-,Namespace:deployment-1786,SelfLink:/api/v1/namespaces/deployment-1786/pods/test-cleanup-deployment-55cbfbc8f5-g7ld6,UID:ac6764a6-8860-11e9-bdc9-0231d0af67bc,ResourceVersion:16396,Generation:0,CreationTimestamp:2019-06-06 13:40:40 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,pod-template-hash: 55cbfbc8f5,},Annotations:map[string]string{cni.projectcalico.org/podIP: 100.96.2.128/32,kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet test-cleanup-deployment-55cbfbc8f5 ac66be16-8860-11e9-bdc9-0231d0af67bc 0xc002869017 0xc002869018}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-mmcjd {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-mmcjd,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [{default-token-mmcjd true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-66-200.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002869080} {node.kubernetes.io/unreachable Exists  NoExecute 0xc0028690a0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:40:40 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:40:41 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:40:41 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:40:40 +0000 UTC  }],Message:,Reason:,HostIP:172.16.66.200,PodIP:100.96.2.128,StartTime:2019-06-06 13:40:40 +0000 UTC,ContainerStatuses:[{redis {nil ContainerStateRunning{StartedAt:2019-06-06 13:40:41 +0000 UTC,} nil} {nil nil nil} true 0 gcr.io/kubernetes-e2e-test-images/redis:1.0 docker-pullable://gcr.io/kubernetes-e2e-test-images/redis@sha256:af4748d1655c08dc54d4be5182135395db9ce87aba2d4699b26b14ae197c5830 docker://2e4930d82176d064f448a48a88adf08aa39fc075ce52b693cfdb3bc00239318f}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:40:42.241: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "deployment-1786" for this suite.
+Jun  6 13:40:48.250: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:40:48.322: INFO: namespace deployment-1786 deletion completed in 6.078867754s
+
+• [SLOW TEST:13.251 seconds]
+[sig-apps] Deployment
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  deployment should delete old replica sets [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:40:48.322: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in emptydir-7189
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test emptydir 0777 on node default medium
+Jun  6 13:40:48.457: INFO: Waiting up to 5m0s for pod "pod-b14f72eb-8860-11e9-b613-8a9bc7c14a19" in namespace "emptydir-7189" to be "success or failure"
+Jun  6 13:40:48.463: INFO: Pod "pod-b14f72eb-8860-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 5.781478ms
+Jun  6 13:40:50.466: INFO: Pod "pod-b14f72eb-8860-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.008397944s
+Jun  6 13:40:52.468: INFO: Pod "pod-b14f72eb-8860-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.011187605s
+STEP: Saw pod success
+Jun  6 13:40:52.469: INFO: Pod "pod-b14f72eb-8860-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 13:40:52.471: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-b14f72eb-8860-11e9-b613-8a9bc7c14a19 container test-container: 
+STEP: delete the pod
+Jun  6 13:40:52.483: INFO: Waiting for pod pod-b14f72eb-8860-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 13:40:52.486: INFO: Pod pod-b14f72eb-8860-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:40:52.486: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-7189" for this suite.
+Jun  6 13:40:58.495: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:40:58.650: INFO: namespace emptydir-7189 deletion completed in 6.161682849s
+
+• [SLOW TEST:10.327 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSS
+------------------------------
+[sig-storage] Subpath Atomic writer volumes 
+  should support subpaths with configmap pod [LinuxOnly] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Subpath
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:40:58.650: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename subpath
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in subpath-5923
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] Atomic writer volumes
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38
+STEP: Setting up data
+[It] should support subpaths with configmap pod [LinuxOnly] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating pod pod-subpath-test-configmap-lffg
+STEP: Creating a pod to test atomic-volume-subpath
+Jun  6 13:40:58.786: INFO: Waiting up to 5m0s for pod "pod-subpath-test-configmap-lffg" in namespace "subpath-5923" to be "success or failure"
+Jun  6 13:40:58.792: INFO: Pod "pod-subpath-test-configmap-lffg": Phase="Pending", Reason="", readiness=false. Elapsed: 6.134597ms
+Jun  6 13:41:00.820: INFO: Pod "pod-subpath-test-configmap-lffg": Phase="Pending", Reason="", readiness=false. Elapsed: 2.034004256s
+Jun  6 13:41:02.823: INFO: Pod "pod-subpath-test-configmap-lffg": Phase="Running", Reason="", readiness=true. Elapsed: 4.036679356s
+Jun  6 13:41:04.825: INFO: Pod "pod-subpath-test-configmap-lffg": Phase="Running", Reason="", readiness=true. Elapsed: 6.039251204s
+Jun  6 13:41:06.828: INFO: Pod "pod-subpath-test-configmap-lffg": Phase="Running", Reason="", readiness=true. Elapsed: 8.041993138s
+Jun  6 13:41:08.831: INFO: Pod "pod-subpath-test-configmap-lffg": Phase="Running", Reason="", readiness=true. Elapsed: 10.044398334s
+Jun  6 13:41:10.834: INFO: Pod "pod-subpath-test-configmap-lffg": Phase="Running", Reason="", readiness=true. Elapsed: 12.047349981s
+Jun  6 13:41:12.836: INFO: Pod "pod-subpath-test-configmap-lffg": Phase="Running", Reason="", readiness=true. Elapsed: 14.049829508s
+Jun  6 13:41:14.839: INFO: Pod "pod-subpath-test-configmap-lffg": Phase="Running", Reason="", readiness=true. Elapsed: 16.052674588s
+Jun  6 13:41:16.842: INFO: Pod "pod-subpath-test-configmap-lffg": Phase="Running", Reason="", readiness=true. Elapsed: 18.05539696s
+Jun  6 13:41:18.844: INFO: Pod "pod-subpath-test-configmap-lffg": Phase="Running", Reason="", readiness=true. Elapsed: 20.05812834s
+Jun  6 13:41:20.847: INFO: Pod "pod-subpath-test-configmap-lffg": Phase="Succeeded", Reason="", readiness=false. Elapsed: 22.06051146s
+STEP: Saw pod success
+Jun  6 13:41:20.847: INFO: Pod "pod-subpath-test-configmap-lffg" satisfied condition "success or failure"
+Jun  6 13:41:20.849: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-subpath-test-configmap-lffg container test-container-subpath-configmap-lffg: 
+STEP: delete the pod
+Jun  6 13:41:20.863: INFO: Waiting for pod pod-subpath-test-configmap-lffg to disappear
+Jun  6 13:41:20.870: INFO: Pod pod-subpath-test-configmap-lffg no longer exists
+STEP: Deleting pod pod-subpath-test-configmap-lffg
+Jun  6 13:41:20.870: INFO: Deleting pod "pod-subpath-test-configmap-lffg" in namespace "subpath-5923"
+[AfterEach] [sig-storage] Subpath
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:41:20.872: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "subpath-5923" for this suite.
+Jun  6 13:41:26.921: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:41:26.990: INFO: namespace subpath-5923 deletion completed in 6.115733463s
+
+• [SLOW TEST:28.340 seconds]
+[sig-storage] Subpath
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22
+  Atomic writer volumes
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34
+    should support subpaths with configmap pod [LinuxOnly] [Conformance]
+    /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSS
+------------------------------
+[sig-storage] Subpath Atomic writer volumes 
+  should support subpaths with projected pod [LinuxOnly] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Subpath
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:41:26.991: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename subpath
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in subpath-8602
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] Atomic writer volumes
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38
+STEP: Setting up data
+[It] should support subpaths with projected pod [LinuxOnly] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating pod pod-subpath-test-projected-k9cv
+STEP: Creating a pod to test atomic-volume-subpath
+Jun  6 13:41:27.128: INFO: Waiting up to 5m0s for pod "pod-subpath-test-projected-k9cv" in namespace "subpath-8602" to be "success or failure"
+Jun  6 13:41:27.136: INFO: Pod "pod-subpath-test-projected-k9cv": Phase="Pending", Reason="", readiness=false. Elapsed: 7.910601ms
+Jun  6 13:41:29.220: INFO: Pod "pod-subpath-test-projected-k9cv": Phase="Running", Reason="", readiness=true. Elapsed: 2.091794043s
+Jun  6 13:41:31.223: INFO: Pod "pod-subpath-test-projected-k9cv": Phase="Running", Reason="", readiness=true. Elapsed: 4.094352359s
+Jun  6 13:41:33.225: INFO: Pod "pod-subpath-test-projected-k9cv": Phase="Running", Reason="", readiness=true. Elapsed: 6.096815416s
+Jun  6 13:41:35.228: INFO: Pod "pod-subpath-test-projected-k9cv": Phase="Running", Reason="", readiness=true. Elapsed: 8.099410363s
+Jun  6 13:41:37.231: INFO: Pod "pod-subpath-test-projected-k9cv": Phase="Running", Reason="", readiness=true. Elapsed: 10.102250241s
+Jun  6 13:41:39.233: INFO: Pod "pod-subpath-test-projected-k9cv": Phase="Running", Reason="", readiness=true. Elapsed: 12.104483987s
+Jun  6 13:41:41.236: INFO: Pod "pod-subpath-test-projected-k9cv": Phase="Running", Reason="", readiness=true. Elapsed: 14.107126678s
+Jun  6 13:41:43.238: INFO: Pod "pod-subpath-test-projected-k9cv": Phase="Running", Reason="", readiness=true. Elapsed: 16.109892304s
+Jun  6 13:41:45.241: INFO: Pod "pod-subpath-test-projected-k9cv": Phase="Running", Reason="", readiness=true. Elapsed: 18.112814141s
+Jun  6 13:41:47.244: INFO: Pod "pod-subpath-test-projected-k9cv": Phase="Running", Reason="", readiness=true. Elapsed: 20.115682953s
+Jun  6 13:41:49.246: INFO: Pod "pod-subpath-test-projected-k9cv": Phase="Succeeded", Reason="", readiness=false. Elapsed: 22.117892454s
+STEP: Saw pod success
+Jun  6 13:41:49.246: INFO: Pod "pod-subpath-test-projected-k9cv" satisfied condition "success or failure"
+Jun  6 13:41:49.248: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-subpath-test-projected-k9cv container test-container-subpath-projected-k9cv: 
+STEP: delete the pod
+Jun  6 13:41:49.264: INFO: Waiting for pod pod-subpath-test-projected-k9cv to disappear
+Jun  6 13:41:49.266: INFO: Pod pod-subpath-test-projected-k9cv no longer exists
+STEP: Deleting pod pod-subpath-test-projected-k9cv
+Jun  6 13:41:49.266: INFO: Deleting pod "pod-subpath-test-projected-k9cv" in namespace "subpath-8602"
+[AfterEach] [sig-storage] Subpath
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:41:49.269: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "subpath-8602" for this suite.
+Jun  6 13:41:55.279: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:41:55.347: INFO: namespace subpath-8602 deletion completed in 6.075936548s
+
+• [SLOW TEST:28.357 seconds]
+[sig-storage] Subpath
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22
+  Atomic writer volumes
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34
+    should support subpaths with projected pod [LinuxOnly] [Conformance]
+    /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected secret 
+  should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected secret
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:41:55.349: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-3170
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating projection with secret that has name projected-secret-test-map-d941fc5e-8860-11e9-b613-8a9bc7c14a19
+STEP: Creating a pod to test consume secrets
+Jun  6 13:41:55.480: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-d9426089-8860-11e9-b613-8a9bc7c14a19" in namespace "projected-3170" to be "success or failure"
+Jun  6 13:41:55.486: INFO: Pod "pod-projected-secrets-d9426089-8860-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 5.799537ms
+Jun  6 13:41:57.489: INFO: Pod "pod-projected-secrets-d9426089-8860-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.008714999s
+STEP: Saw pod success
+Jun  6 13:41:57.489: INFO: Pod "pod-projected-secrets-d9426089-8860-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 13:41:57.491: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-projected-secrets-d9426089-8860-11e9-b613-8a9bc7c14a19 container projected-secret-volume-test: 
+STEP: delete the pod
+Jun  6 13:41:57.504: INFO: Waiting for pod pod-projected-secrets-d9426089-8860-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 13:41:57.506: INFO: Pod pod-projected-secrets-d9426089-8860-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] Projected secret
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:41:57.506: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-3170" for this suite.
+Jun  6 13:42:03.517: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:42:03.592: INFO: namespace projected-3170 deletion completed in 6.083598976s
+
+• [SLOW TEST:8.243 seconds]
+[sig-storage] Projected secret
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:33
+  should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] Deployment 
+  RollingUpdateDeployment should delete old pods and create new ones [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:42:03.592: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename deployment
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in deployment-9344
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:65
+[It] RollingUpdateDeployment should delete old pods and create new ones [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+Jun  6 13:42:03.718: INFO: Creating replica set "test-rolling-update-controller" (going to be adopted)
+Jun  6 13:42:03.725: INFO: Pod name sample-pod: Found 0 pods out of 1
+Jun  6 13:42:08.727: INFO: Pod name sample-pod: Found 1 pods out of 1
+STEP: ensuring each pod is running
+Jun  6 13:42:08.727: INFO: Creating deployment "test-rolling-update-deployment"
+Jun  6 13:42:08.731: INFO: Ensuring deployment "test-rolling-update-deployment" gets the next revision from the one the adopted replica set "test-rolling-update-controller" has
+Jun  6 13:42:08.739: INFO: new replicaset for deployment "test-rolling-update-deployment" is yet to be created
+Jun  6 13:42:10.743: INFO: Ensuring status for deployment "test-rolling-update-deployment" is the expected
+Jun  6 13:42:10.750: INFO: Ensuring deployment "test-rolling-update-deployment" has one old replica set (the one it adopted)
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:59
+Jun  6 13:42:10.755: INFO: Deployment "test-rolling-update-deployment":
+&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rolling-update-deployment,GenerateName:,Namespace:deployment-9344,SelfLink:/apis/apps/v1/namespaces/deployment-9344/deployments/test-rolling-update-deployment,UID:e128ab7b-8860-11e9-bdc9-0231d0af67bc,ResourceVersion:16738,Generation:1,CreationTimestamp:2019-06-06 13:42:08 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,},Annotations:map[string]string{deployment.kubernetes.io/revision: 3546343826724305833,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:DeploymentSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[{Available True 2019-06-06 13:42:08 +0000 UTC 2019-06-06 13:42:08 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} {Progressing True 2019-06-06 13:42:10 +0000 UTC 2019-06-06 13:42:08 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-rolling-update-deployment-67599b4d9" has successfully progressed.}],ReadyReplicas:1,CollisionCount:nil,},}
+
+Jun  6 13:42:10.757: INFO: New ReplicaSet "test-rolling-update-deployment-67599b4d9" of Deployment "test-rolling-update-deployment":
+&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rolling-update-deployment-67599b4d9,GenerateName:,Namespace:deployment-9344,SelfLink:/apis/apps/v1/namespaces/deployment-9344/replicasets/test-rolling-update-deployment-67599b4d9,UID:e129d626-8860-11e9-bdc9-0231d0af67bc,ResourceVersion:16728,Generation:1,CreationTimestamp:2019-06-06 13:42:08 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod-template-hash: 67599b4d9,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 3546343826724305833,},OwnerReferences:[{apps/v1 Deployment test-rolling-update-deployment e128ab7b-8860-11e9-bdc9-0231d0af67bc 0xc002e6b440 0xc002e6b441}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,pod-template-hash: 67599b4d9,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod-template-hash: 67599b4d9,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[],},}
+Jun  6 13:42:10.757: INFO: All old ReplicaSets of Deployment "test-rolling-update-deployment":
+Jun  6 13:42:10.757: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rolling-update-controller,GenerateName:,Namespace:deployment-9344,SelfLink:/apis/apps/v1/namespaces/deployment-9344/replicasets/test-rolling-update-controller,UID:de2c51cf-8860-11e9-bdc9-0231d0af67bc,ResourceVersion:16737,Generation:2,CreationTimestamp:2019-06-06 13:42:03 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod: nginx,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 3546343826724305832,},OwnerReferences:[{apps/v1 Deployment test-rolling-update-deployment e128ab7b-8860-11e9-bdc9-0231d0af67bc 0xc002e6b377 0xc002e6b378}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*0,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,pod: nginx,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod: nginx,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},}
+Jun  6 13:42:10.760: INFO: Pod "test-rolling-update-deployment-67599b4d9-8tmxh" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rolling-update-deployment-67599b4d9-8tmxh,GenerateName:test-rolling-update-deployment-67599b4d9-,Namespace:deployment-9344,SelfLink:/api/v1/namespaces/deployment-9344/pods/test-rolling-update-deployment-67599b4d9-8tmxh,UID:e12a34e1-8860-11e9-bdc9-0231d0af67bc,ResourceVersion:16727,Generation:0,CreationTimestamp:2019-06-06 13:42:08 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod-template-hash: 67599b4d9,},Annotations:map[string]string{cni.projectcalico.org/podIP: 100.96.2.134/32,kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet test-rolling-update-deployment-67599b4d9 e129d626-8860-11e9-bdc9-0231d0af67bc 0xc002e6bce0 0xc002e6bce1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-lznq4 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-lznq4,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [{default-token-lznq4 true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-66-200.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002e6bd40} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002e6bd60}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:42:08 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:42:10 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:42:10 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:42:08 +0000 UTC  }],Message:,Reason:,HostIP:172.16.66.200,PodIP:100.96.2.134,StartTime:2019-06-06 13:42:08 +0000 UTC,ContainerStatuses:[{redis {nil ContainerStateRunning{StartedAt:2019-06-06 13:42:10 +0000 UTC,} nil} {nil nil nil} true 0 gcr.io/kubernetes-e2e-test-images/redis:1.0 docker-pullable://gcr.io/kubernetes-e2e-test-images/redis@sha256:af4748d1655c08dc54d4be5182135395db9ce87aba2d4699b26b14ae197c5830 docker://1a9b6778bcbd10e6e77bd74524c4c1a6892d3c4c5872af6b68c6fe59a66cd202}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:42:10.760: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "deployment-9344" for this suite.
+Jun  6 13:42:16.770: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:42:16.861: INFO: namespace deployment-9344 deletion completed in 6.098529307s
+
+• [SLOW TEST:13.269 seconds]
+[sig-apps] Deployment
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  RollingUpdateDeployment should delete old pods and create new ones [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Kubelet when scheduling a read only busybox container 
+  should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:42:16.862: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename kubelet-test
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in kubelet-test-1706
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37
+[It] should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[AfterEach] [k8s.io] Kubelet
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:42:19.005: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubelet-test-1706" for this suite.
+Jun  6 13:42:57.015: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:42:57.082: INFO: namespace kubelet-test-1706 deletion completed in 38.075099213s
+
+• [SLOW TEST:40.220 seconds]
+[k8s.io] Kubelet
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  when scheduling a read only busybox container
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:187
+    should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance]
+    /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+S
+------------------------------
+[sig-storage] ConfigMap 
+  should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:42:57.083: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename configmap
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in configmap-5574
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating configMap with name configmap-test-volume-fe15c8d0-8860-11e9-b613-8a9bc7c14a19
+STEP: Creating a pod to test consume configMaps
+Jun  6 13:42:57.267: INFO: Waiting up to 5m0s for pod "pod-configmaps-fe163574-8860-11e9-b613-8a9bc7c14a19" in namespace "configmap-5574" to be "success or failure"
+Jun  6 13:42:57.271: INFO: Pod "pod-configmaps-fe163574-8860-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 4.159759ms
+Jun  6 13:42:59.274: INFO: Pod "pod-configmaps-fe163574-8860-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.00678351s
+Jun  6 13:43:01.277: INFO: Pod "pod-configmaps-fe163574-8860-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.009591609s
+STEP: Saw pod success
+Jun  6 13:43:01.277: INFO: Pod "pod-configmaps-fe163574-8860-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 13:43:01.279: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-configmaps-fe163574-8860-11e9-b613-8a9bc7c14a19 container configmap-volume-test: 
+STEP: delete the pod
+Jun  6 13:43:01.293: INFO: Waiting for pod pod-configmaps-fe163574-8860-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 13:43:01.294: INFO: Pod pod-configmaps-fe163574-8860-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:43:01.294: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "configmap-5574" for this suite.
+Jun  6 13:43:07.303: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:43:07.374: INFO: namespace configmap-5574 deletion completed in 6.077492798s
+
+• [SLOW TEST:10.291 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32
+  should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSS
+------------------------------
+[sig-storage] Secrets 
+  optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Secrets
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:43:07.374: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename secrets
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in secrets-6832
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating secret with name s-test-opt-del-043097a4-8861-11e9-b613-8a9bc7c14a19
+STEP: Creating secret with name s-test-opt-upd-043097ec-8861-11e9-b613-8a9bc7c14a19
+STEP: Creating the pod
+STEP: Deleting secret s-test-opt-del-043097a4-8861-11e9-b613-8a9bc7c14a19
+STEP: Updating secret s-test-opt-upd-043097ec-8861-11e9-b613-8a9bc7c14a19
+STEP: Creating secret with name s-test-opt-create-04309826-8861-11e9-b613-8a9bc7c14a19
+STEP: waiting to observe update in volume
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:44:15.935: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "secrets-6832" for this suite.
+Jun  6 13:44:37.944: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:44:38.097: INFO: namespace secrets-6832 deletion completed in 22.15949638s
+
+• [SLOW TEST:90.723 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:33
+  optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSS
+------------------------------
+[sig-network] DNS 
+  should provide /etc/hosts entries for the cluster [LinuxOnly] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-network] DNS
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:44:38.098: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename dns
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in dns-7790
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide /etc/hosts entries for the cluster [LinuxOnly] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Running these commands on wheezy: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-1.dns-test-service.dns-7790.svc.cluster.local)" && echo OK > /results/wheezy_hosts@dns-querier-1.dns-test-service.dns-7790.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/wheezy_hosts@dns-querier-1;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-7790.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;sleep 1; done
+
+STEP: Running these commands on jessie: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-1.dns-test-service.dns-7790.svc.cluster.local)" && echo OK > /results/jessie_hosts@dns-querier-1.dns-test-service.dns-7790.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/jessie_hosts@dns-querier-1;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-7790.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;sleep 1; done
+
+STEP: creating a pod to probe /etc/hosts
+STEP: submitting the pod to kubernetes
+STEP: retrieving the pod
+STEP: looking for the results for each expected name from probers
+Jun  6 13:44:48.261: INFO: DNS probes using dns-7790/dns-test-3a439d81-8861-11e9-b613-8a9bc7c14a19 succeeded
+
+STEP: deleting the pod
+[AfterEach] [sig-network] DNS
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:44:48.270: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "dns-7790" for this suite.
+Jun  6 13:44:54.279: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:44:54.351: INFO: namespace dns-7790 deletion completed in 6.078192575s
+
+• [SLOW TEST:16.253 seconds]
+[sig-network] DNS
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22
+  should provide /etc/hosts entries for the cluster [LinuxOnly] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Pods 
+  should be submitted and removed [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:44:54.351: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename pods
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in pods-3505
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:135
+[It] should be submitted and removed [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating the pod
+STEP: setting up watch
+STEP: submitting the pod to kubernetes
+Jun  6 13:44:54.484: INFO: observed the pod list
+STEP: verifying the pod is in kubernetes
+STEP: verifying pod creation was observed
+STEP: deleting the pod gracefully
+STEP: verifying the kubelet observed the termination notice
+STEP: verifying pod deletion was observed
+[AfterEach] [k8s.io] Pods
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:45:05.832: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pods-3505" for this suite.
+Jun  6 13:45:11.841: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:45:12.058: INFO: namespace pods-3505 deletion completed in 6.22414268s
+
+• [SLOW TEST:17.708 seconds]
+[k8s.io] Pods
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should be submitted and removed [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Pods 
+  should contain environment variables for services [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:45:12.059: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename pods
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in pods-9102
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:135
+[It] should contain environment variables for services [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+Jun  6 13:45:16.211: INFO: Waiting up to 5m0s for pod "client-envvars-50e7827a-8861-11e9-b613-8a9bc7c14a19" in namespace "pods-9102" to be "success or failure"
+Jun  6 13:45:16.213: INFO: Pod "client-envvars-50e7827a-8861-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.342692ms
+Jun  6 13:45:18.215: INFO: Pod "client-envvars-50e7827a-8861-11e9-b613-8a9bc7c14a19": Phase="Running", Reason="", readiness=true. Elapsed: 2.004330119s
+Jun  6 13:45:20.220: INFO: Pod "client-envvars-50e7827a-8861-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.009380496s
+STEP: Saw pod success
+Jun  6 13:45:20.220: INFO: Pod "client-envvars-50e7827a-8861-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 13:45:20.222: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod client-envvars-50e7827a-8861-11e9-b613-8a9bc7c14a19 container env3cont: 
+STEP: delete the pod
+Jun  6 13:45:20.241: INFO: Waiting for pod client-envvars-50e7827a-8861-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 13:45:20.243: INFO: Pod client-envvars-50e7827a-8861-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [k8s.io] Pods
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:45:20.243: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pods-9102" for this suite.
+Jun  6 13:46:10.253: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:46:10.350: INFO: namespace pods-9102 deletion completed in 50.103486204s
+
+• [SLOW TEST:58.291 seconds]
+[k8s.io] Pods
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should contain environment variables for services [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-network] Networking Granular Checks: Pods 
+  should function for intra-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-network] Networking
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:46:10.350: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename pod-network-test
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in pod-network-test-7453
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should function for intra-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Performing setup for networking test in namespace pod-network-test-7453
+STEP: creating a selector
+STEP: Creating the service pods in kubernetes
+Jun  6 13:46:10.475: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable
+STEP: Creating test pods
+Jun  6 13:46:30.529: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://100.96.2.141:8080/dial?request=hostName&protocol=http&host=100.96.1.39&port=8080&tries=1'] Namespace:pod-network-test-7453 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  6 13:46:30.529: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+Jun  6 13:46:30.778: INFO: Waiting for endpoints: map[]
+Jun  6 13:46:30.780: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://100.96.2.141:8080/dial?request=hostName&protocol=http&host=100.96.2.140&port=8080&tries=1'] Namespace:pod-network-test-7453 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  6 13:46:30.780: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+Jun  6 13:46:30.964: INFO: Waiting for endpoints: map[]
+[AfterEach] [sig-network] Networking
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:46:30.964: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pod-network-test-7453" for this suite.
+Jun  6 13:46:52.973: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:46:53.050: INFO: namespace pod-network-test-7453 deletion completed in 22.084284346s
+
+• [SLOW TEST:42.701 seconds]
+[sig-network] Networking
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:25
+  Granular Checks: Pods
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:28
+    should function for intra-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]
+    /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] [sig-node] Events 
+  should be sent by kubelets and the scheduler about pods scheduling and running  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] [sig-node] Events
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:46:53.051: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename events
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in events-7685
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be sent by kubelets and the scheduler about pods scheduling and running  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating the pod
+STEP: submitting the pod to kubernetes
+STEP: verifying the pod is in kubernetes
+STEP: retrieving the pod
+Jun  6 13:46:55.195: INFO: &Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:send-events-8ab425f0-8861-11e9-b613-8a9bc7c14a19,GenerateName:,Namespace:events-7685,SelfLink:/api/v1/namespaces/events-7685/pods/send-events-8ab425f0-8861-11e9-b613-8a9bc7c14a19,UID:8ab486dd-8861-11e9-bdc9-0231d0af67bc,ResourceVersion:17508,Generation:0,CreationTimestamp:2019-06-06 13:46:53 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: foo,time: 177809833,},Annotations:map[string]string{cni.projectcalico.org/podIP: 100.96.2.142/32,kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-gqhfn {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-gqhfn,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{p gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1 [] []  [{ 0 80 TCP }] [] [] {map[] map[]} [{default-token-gqhfn true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*30,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-66-200.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc003174190} {node.kubernetes.io/unreachable Exists  NoExecute 0xc0031741b0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:46:53 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:46:54 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:46:54 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 13:46:53 +0000 UTC  }],Message:,Reason:,HostIP:172.16.66.200,PodIP:100.96.2.142,StartTime:2019-06-06 13:46:53 +0000 UTC,ContainerStatuses:[{p {nil ContainerStateRunning{StartedAt:2019-06-06 13:46:54 +0000 UTC,} nil} {nil nil nil} true 0 gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1 docker-pullable://gcr.io/kubernetes-e2e-test-images/serve-hostname@sha256:bab70473a6d8ef65a22625dc9a1b0f0452e811530fdbe77e4408523460177ff1 docker://fd061747901e0e3c5ac6b5b899fd2a4b4483cd0e5c2a0fdd2f2803f8f06ac6b4}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+
+STEP: checking for scheduler event about the pod
+Jun  6 13:46:57.198: INFO: Saw scheduler event for our pod.
+STEP: checking for kubelet event about the pod
+Jun  6 13:46:59.200: INFO: Saw kubelet event for our pod.
+STEP: deleting the pod
+[AfterEach] [k8s.io] [sig-node] Events
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:46:59.205: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "events-7685" for this suite.
+Jun  6 13:47:37.219: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:47:37.320: INFO: namespace events-7685 deletion completed in 38.113312999s
+
+• [SLOW TEST:44.270 seconds]
+[k8s.io] [sig-node] Events
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should be sent by kubelets and the scheduler about pods scheduling and running  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl version 
+  should check is all data is printed  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:47:37.321: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in kubectl-5311
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213
+[It] should check is all data is printed  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+Jun  6 13:47:37.445: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 version'
+Jun  6 13:47:37.500: INFO: stderr: ""
+Jun  6 13:47:37.500: INFO: stdout: "Client Version: version.Info{Major:\"1\", Minor:\"14\", GitVersion:\"v1.14.2\", GitCommit:\"66049e3b21efe110454d67df4fa62b08ea79a19b\", GitTreeState:\"clean\", BuildDate:\"2019-05-16T16:23:09Z\", GoVersion:\"go1.12.5\", Compiler:\"gc\", Platform:\"linux/amd64\"}\nServer Version: version.Info{Major:\"1\", Minor:\"14\", GitVersion:\"v1.14.2\", GitCommit:\"66049e3b21efe110454d67df4fa62b08ea79a19b\", GitTreeState:\"clean\", BuildDate:\"2019-05-16T16:14:56Z\", GoVersion:\"go1.12.5\", Compiler:\"gc\", Platform:\"linux/amd64\"}\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:47:37.500: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-5311" for this suite.
+Jun  6 13:47:43.510: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:47:43.575: INFO: namespace kubectl-5311 deletion completed in 6.072156957s
+
+• [SLOW TEST:6.254 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl version
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    should check is all data is printed  [Conformance]
+    /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+S
+------------------------------
+[k8s.io] [sig-node] PreStop 
+  should call prestop when killing a pod  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] [sig-node] PreStop
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:47:43.576: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename prestop
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in prestop-6669
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] [sig-node] PreStop
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/node/pre_stop.go:167
+[It] should call prestop when killing a pod  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating server pod server in namespace prestop-6669
+STEP: Waiting for pods to come up.
+STEP: Creating tester pod tester in namespace prestop-6669
+STEP: Deleting pre-stop pod
+Jun  6 13:47:54.855: INFO: Saw: {
+	"Hostname": "server",
+	"Sent": null,
+	"Received": {
+		"prestop": 1
+	},
+	"Errors": null,
+	"Log": [
+		"default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up.",
+		"default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up."
+	],
+	"StillContactingPeers": true
+}
+STEP: Deleting the server pod
+[AfterEach] [k8s.io] [sig-node] PreStop
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:47:54.860: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "prestop-6669" for this suite.
+Jun  6 13:48:32.871: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:48:32.942: INFO: namespace prestop-6669 deletion completed in 38.079985819s
+
+• [SLOW TEST:49.366 seconds]
+[k8s.io] [sig-node] PreStop
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should call prestop when killing a pod  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSS
+------------------------------
+[sig-node] Downward API 
+  should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-node] Downward API
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:48:32.942: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in downward-api-3046
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward api env vars
+Jun  6 13:48:33.074: INFO: Waiting up to 5m0s for pod "downward-api-c63e1312-8861-11e9-b613-8a9bc7c14a19" in namespace "downward-api-3046" to be "success or failure"
+Jun  6 13:48:33.078: INFO: Pod "downward-api-c63e1312-8861-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 4.124763ms
+Jun  6 13:48:35.081: INFO: Pod "downward-api-c63e1312-8861-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.007012724s
+Jun  6 13:48:37.084: INFO: Pod "downward-api-c63e1312-8861-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.010233608s
+STEP: Saw pod success
+Jun  6 13:48:37.084: INFO: Pod "downward-api-c63e1312-8861-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 13:48:37.087: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod downward-api-c63e1312-8861-11e9-b613-8a9bc7c14a19 container dapi-container: 
+STEP: delete the pod
+Jun  6 13:48:37.107: INFO: Waiting for pod downward-api-c63e1312-8861-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 13:48:37.109: INFO: Pod downward-api-c63e1312-8861-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-node] Downward API
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:48:37.109: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-3046" for this suite.
+Jun  6 13:48:43.119: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:48:43.347: INFO: namespace downward-api-3046 deletion completed in 6.235729279s
+
+• [SLOW TEST:10.405 seconds]
+[sig-node] Downward API
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:38
+  should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+[sig-node] ConfigMap 
+  should be consumable via environment variable [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-node] ConfigMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:48:43.347: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename configmap
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in configmap-5170
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable via environment variable [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating configMap configmap-5170/configmap-test-cc71e416-8861-11e9-b613-8a9bc7c14a19
+STEP: Creating a pod to test consume configMaps
+Jun  6 13:48:43.481: INFO: Waiting up to 5m0s for pod "pod-configmaps-cc72454d-8861-11e9-b613-8a9bc7c14a19" in namespace "configmap-5170" to be "success or failure"
+Jun  6 13:48:43.486: INFO: Pod "pod-configmaps-cc72454d-8861-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 4.281076ms
+Jun  6 13:48:45.488: INFO: Pod "pod-configmaps-cc72454d-8861-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.006829037s
+Jun  6 13:48:47.491: INFO: Pod "pod-configmaps-cc72454d-8861-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.009246791s
+STEP: Saw pod success
+Jun  6 13:48:47.491: INFO: Pod "pod-configmaps-cc72454d-8861-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 13:48:47.492: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-configmaps-cc72454d-8861-11e9-b613-8a9bc7c14a19 container env-test: 
+STEP: delete the pod
+Jun  6 13:48:47.506: INFO: Waiting for pod pod-configmaps-cc72454d-8861-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 13:48:47.508: INFO: Pod pod-configmaps-cc72454d-8861-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-node] ConfigMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:48:47.508: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "configmap-5170" for this suite.
+Jun  6 13:48:53.517: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:48:53.586: INFO: namespace configmap-5170 deletion completed in 6.075127051s
+
+• [SLOW TEST:10.238 seconds]
+[sig-node] ConfigMap
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap.go:32
+  should be consumable via environment variable [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+[k8s.io] Probing container 
+  with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:48:53.586: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename container-probe
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in container-probe-2875
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:51
+[It] with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+Jun  6 13:49:15.747: INFO: Container started at 2019-06-06 13:48:55 +0000 UTC, pod became ready at 2019-06-06 13:49:14 +0000 UTC
+[AfterEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:49:15.747: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-probe-2875" for this suite.
+Jun  6 13:49:37.756: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:49:37.823: INFO: namespace container-probe-2875 deletion completed in 22.073651976s
+
+• [SLOW TEST:44.237 seconds]
+[k8s.io] Probing container
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] 
+  Should recreate evicted statefulset [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:49:37.823: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename statefulset
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in statefulset-625
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:59
+[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:74
+STEP: Creating service test in namespace statefulset-625
+[It] Should recreate evicted statefulset [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Looking for a node to schedule stateful set and pod
+STEP: Creating pod with conflicting port in namespace statefulset-625
+STEP: Creating statefulset with conflicting port in namespace statefulset-625
+STEP: Waiting until pod test-pod will start running in namespace statefulset-625
+STEP: Waiting until stateful pod ss-0 will be recreated and deleted at least once in namespace statefulset-625
+Jun  6 13:49:42.030: INFO: Observed stateful pod in namespace: statefulset-625, name: ss-0, uid: ef1b98b1-8861-11e9-bdc9-0231d0af67bc, status phase: Pending. Waiting for statefulset controller to delete.
+Jun  6 13:49:42.218: INFO: Observed stateful pod in namespace: statefulset-625, name: ss-0, uid: ef1b98b1-8861-11e9-bdc9-0231d0af67bc, status phase: Failed. Waiting for statefulset controller to delete.
+Jun  6 13:49:42.223: INFO: Observed stateful pod in namespace: statefulset-625, name: ss-0, uid: ef1b98b1-8861-11e9-bdc9-0231d0af67bc, status phase: Failed. Waiting for statefulset controller to delete.
+Jun  6 13:49:42.227: INFO: Observed delete event for stateful pod ss-0 in namespace statefulset-625
+STEP: Removing pod with conflicting port in namespace statefulset-625
+STEP: Waiting when stateful pod ss-0 will be recreated in namespace statefulset-625 and will be in running state
+[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:85
+Jun  6 13:49:46.246: INFO: Deleting all statefulset in ns statefulset-625
+Jun  6 13:49:46.247: INFO: Scaling statefulset ss to 0
+Jun  6 13:49:56.257: INFO: Waiting for statefulset status.replicas updated to 0
+Jun  6 13:49:56.259: INFO: Deleting statefulset ss
+[AfterEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:49:56.268: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "statefulset-625" for this suite.
+Jun  6 13:50:02.282: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:50:02.353: INFO: namespace statefulset-625 deletion completed in 6.078450491s
+
+• [SLOW TEST:24.529 seconds]
+[sig-apps] StatefulSet
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    Should recreate evicted statefulset [Conformance]
+    /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:50:02.353: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in downward-api-2992
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward API volume plugin
+Jun  6 13:50:02.483: INFO: Waiting up to 5m0s for pod "downwardapi-volume-fb88dd06-8861-11e9-b613-8a9bc7c14a19" in namespace "downward-api-2992" to be "success or failure"
+Jun  6 13:50:02.487: INFO: Pod "downwardapi-volume-fb88dd06-8861-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 3.815061ms
+Jun  6 13:50:04.490: INFO: Pod "downwardapi-volume-fb88dd06-8861-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006322426s
+STEP: Saw pod success
+Jun  6 13:50:04.490: INFO: Pod "downwardapi-volume-fb88dd06-8861-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 13:50:04.491: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod downwardapi-volume-fb88dd06-8861-11e9-b613-8a9bc7c14a19 container client-container: 
+STEP: delete the pod
+Jun  6 13:50:04.505: INFO: Waiting for pod downwardapi-volume-fb88dd06-8861-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 13:50:04.507: INFO: Pod downwardapi-volume-fb88dd06-8861-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:50:04.507: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-2992" for this suite.
+Jun  6 13:50:10.516: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:50:10.582: INFO: namespace downward-api-2992 deletion completed in 6.07298973s
+
+• [SLOW TEST:8.229 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should provide container's cpu limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:50:10.582: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in downward-api-1746
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should provide container's cpu limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward API volume plugin
+Jun  6 13:50:10.718: INFO: Waiting up to 5m0s for pod "downwardapi-volume-0071401f-8862-11e9-b613-8a9bc7c14a19" in namespace "downward-api-1746" to be "success or failure"
+Jun  6 13:50:10.723: INFO: Pod "downwardapi-volume-0071401f-8862-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 4.914273ms
+Jun  6 13:50:12.725: INFO: Pod "downwardapi-volume-0071401f-8862-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.007487186s
+Jun  6 13:50:14.728: INFO: Pod "downwardapi-volume-0071401f-8862-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.010135068s
+STEP: Saw pod success
+Jun  6 13:50:14.728: INFO: Pod "downwardapi-volume-0071401f-8862-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 13:50:14.730: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod downwardapi-volume-0071401f-8862-11e9-b613-8a9bc7c14a19 container client-container: 
+STEP: delete the pod
+Jun  6 13:50:14.743: INFO: Waiting for pod downwardapi-volume-0071401f-8862-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 13:50:14.746: INFO: Pod downwardapi-volume-0071401f-8862-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:50:14.746: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-1746" for this suite.
+Jun  6 13:50:20.755: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:50:20.816: INFO: namespace downward-api-1746 deletion completed in 6.068295883s
+
+• [SLOW TEST:10.234 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should provide container's cpu limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:50:20.817: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-2263
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward API volume plugin
+Jun  6 13:50:21.001: INFO: Waiting up to 5m0s for pod "downwardapi-volume-069240fa-8862-11e9-b613-8a9bc7c14a19" in namespace "projected-2263" to be "success or failure"
+Jun  6 13:50:21.005: INFO: Pod "downwardapi-volume-069240fa-8862-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 3.977679ms
+Jun  6 13:50:23.007: INFO: Pod "downwardapi-volume-069240fa-8862-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.006357797s
+Jun  6 13:50:25.020: INFO: Pod "downwardapi-volume-069240fa-8862-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.019242531s
+STEP: Saw pod success
+Jun  6 13:50:25.020: INFO: Pod "downwardapi-volume-069240fa-8862-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 13:50:25.022: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod downwardapi-volume-069240fa-8862-11e9-b613-8a9bc7c14a19 container client-container: 
+STEP: delete the pod
+Jun  6 13:50:25.048: INFO: Waiting for pod downwardapi-volume-069240fa-8862-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 13:50:25.051: INFO: Pod downwardapi-volume-069240fa-8862-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:50:25.051: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-2263" for this suite.
+Jun  6 13:50:31.060: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:50:31.213: INFO: namespace projected-2263 deletion completed in 6.160815181s
+
+• [SLOW TEST:10.397 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] 
+  should perform rolling updates and roll backs of template modifications [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:50:31.214: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename statefulset
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in statefulset-5916
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:59
+[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:74
+STEP: Creating service test in namespace statefulset-5916
+[It] should perform rolling updates and roll backs of template modifications [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a new StatefulSet
+Jun  6 13:50:31.361: INFO: Found 0 stateful pods, waiting for 3
+Jun  6 13:50:41.421: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true
+Jun  6 13:50:41.421: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true
+Jun  6 13:50:41.421: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true
+Jun  6 13:50:41.429: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-5916 ss2-1 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+Jun  6 13:50:41.658: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n"
+Jun  6 13:50:41.658: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+Jun  6 13:50:41.658: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss2-1: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+STEP: Updating StatefulSet template: update image from docker.io/library/nginx:1.14-alpine to docker.io/library/nginx:1.15-alpine
+Jun  6 13:50:51.684: INFO: Updating stateful set ss2
+STEP: Creating a new revision
+STEP: Updating Pods in reverse ordinal order
+Jun  6 13:51:01.697: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-5916 ss2-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  6 13:51:01.900: INFO: stderr: "+ mv -v /tmp/index.html /usr/share/nginx/html/\n"
+Jun  6 13:51:01.900: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+Jun  6 13:51:01.900: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss2-1: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+Jun  6 13:51:21.927: INFO: Waiting for StatefulSet statefulset-5916/ss2 to complete update
+Jun  6 13:51:21.927: INFO: Waiting for Pod statefulset-5916/ss2-0 to have revision ss2-c79899b9 update revision ss2-787997d666
+STEP: Rolling back to a previous revision
+Jun  6 13:51:31.931: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-5916 ss2-1 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+Jun  6 13:51:32.190: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n"
+Jun  6 13:51:32.190: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+Jun  6 13:51:32.190: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss2-1: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+Jun  6 13:51:42.214: INFO: Updating stateful set ss2
+STEP: Rolling back update in reverse ordinal order
+Jun  6 13:51:52.227: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 exec --namespace=statefulset-5916 ss2-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  6 13:51:52.444: INFO: stderr: "+ mv -v /tmp/index.html /usr/share/nginx/html/\n"
+Jun  6 13:51:52.444: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+Jun  6 13:51:52.444: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss2-1: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+Jun  6 13:52:12.623: INFO: Waiting for StatefulSet statefulset-5916/ss2 to complete update
+Jun  6 13:52:12.624: INFO: Waiting for Pod statefulset-5916/ss2-0 to have revision ss2-787997d666 update revision ss2-c79899b9
+[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:85
+Jun  6 13:52:22.629: INFO: Deleting all statefulset in ns statefulset-5916
+Jun  6 13:52:22.630: INFO: Scaling statefulset ss2 to 0
+Jun  6 13:52:52.641: INFO: Waiting for statefulset status.replicas updated to 0
+Jun  6 13:52:52.643: INFO: Deleting statefulset ss2
+[AfterEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:52:52.651: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "statefulset-5916" for this suite.
+Jun  6 13:52:58.665: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:52:58.780: INFO: namespace statefulset-5916 deletion completed in 6.124550629s
+
+• [SLOW TEST:147.566 seconds]
+[sig-apps] StatefulSet
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    should perform rolling updates and roll backs of template modifications [Conformance]
+    /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should provide container's memory limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:52:58.781: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-7367
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should provide container's memory limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward API volume plugin
+Jun  6 13:52:58.923: INFO: Waiting up to 5m0s for pod "downwardapi-volume-64b3cb56-8862-11e9-b613-8a9bc7c14a19" in namespace "projected-7367" to be "success or failure"
+Jun  6 13:52:58.937: INFO: Pod "downwardapi-volume-64b3cb56-8862-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 14.046649ms
+Jun  6 13:53:00.940: INFO: Pod "downwardapi-volume-64b3cb56-8862-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.016713869s
+STEP: Saw pod success
+Jun  6 13:53:00.940: INFO: Pod "downwardapi-volume-64b3cb56-8862-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 13:53:00.942: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod downwardapi-volume-64b3cb56-8862-11e9-b613-8a9bc7c14a19 container client-container: 
+STEP: delete the pod
+Jun  6 13:53:00.958: INFO: Waiting for pod downwardapi-volume-64b3cb56-8862-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 13:53:00.959: INFO: Pod downwardapi-volume-64b3cb56-8862-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:53:00.959: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-7367" for this suite.
+Jun  6 13:53:06.970: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:53:07.036: INFO: namespace projected-7367 deletion completed in 6.074193118s
+
+• [SLOW TEST:8.255 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should provide container's memory limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should provide podname only [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:53:07.037: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-2379
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should provide podname only [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward API volume plugin
+Jun  6 13:53:07.169: INFO: Waiting up to 5m0s for pod "downwardapi-volume-699df560-8862-11e9-b613-8a9bc7c14a19" in namespace "projected-2379" to be "success or failure"
+Jun  6 13:53:07.172: INFO: Pod "downwardapi-volume-699df560-8862-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 3.04371ms
+Jun  6 13:53:09.175: INFO: Pod "downwardapi-volume-699df560-8862-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.005578687s
+STEP: Saw pod success
+Jun  6 13:53:09.175: INFO: Pod "downwardapi-volume-699df560-8862-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 13:53:09.177: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod downwardapi-volume-699df560-8862-11e9-b613-8a9bc7c14a19 container client-container: 
+STEP: delete the pod
+Jun  6 13:53:09.192: INFO: Waiting for pod downwardapi-volume-699df560-8862-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 13:53:09.194: INFO: Pod downwardapi-volume-699df560-8862-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:53:09.194: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-2379" for this suite.
+Jun  6 13:53:15.203: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:53:15.279: INFO: namespace projected-2379 deletion completed in 6.082511627s
+
+• [SLOW TEST:8.242 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should provide podname only [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSS
+------------------------------
+[sig-apps] Daemon set [Serial] 
+  should rollback without unnecessary restarts [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:53:15.279: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename daemonsets
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in daemonsets-7146
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:102
+[It] should rollback without unnecessary restarts [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+Jun  6 13:53:15.424: INFO: Create a RollingUpdate DaemonSet
+Jun  6 13:53:15.428: INFO: Check that daemon pods launch on every node of the cluster
+Jun  6 13:53:15.431: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:53:15.437: INFO: Number of nodes with available pods: 0
+Jun  6 13:53:15.437: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod
+Jun  6 13:53:16.439: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:53:16.442: INFO: Number of nodes with available pods: 0
+Jun  6 13:53:16.442: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod
+Jun  6 13:53:17.440: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:53:17.442: INFO: Number of nodes with available pods: 2
+Jun  6 13:53:17.442: INFO: Number of running nodes: 2, number of available pods: 2
+Jun  6 13:53:17.442: INFO: Update the DaemonSet to trigger a rollout
+Jun  6 13:53:17.447: INFO: Updating DaemonSet daemon-set
+Jun  6 13:53:26.456: INFO: Roll back the DaemonSet before rollout is complete
+Jun  6 13:53:26.461: INFO: Updating DaemonSet daemon-set
+Jun  6 13:53:26.461: INFO: Make sure DaemonSet rollback is complete
+Jun  6 13:53:26.464: INFO: Wrong image for pod: daemon-set-26dfk. Expected: docker.io/library/nginx:1.14-alpine, got: foo:non-existent.
+Jun  6 13:53:26.464: INFO: Pod daemon-set-26dfk is not available
+Jun  6 13:53:26.466: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:53:27.469: INFO: Wrong image for pod: daemon-set-26dfk. Expected: docker.io/library/nginx:1.14-alpine, got: foo:non-existent.
+Jun  6 13:53:27.469: INFO: Pod daemon-set-26dfk is not available
+Jun  6 13:53:27.471: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:53:28.469: INFO: Wrong image for pod: daemon-set-26dfk. Expected: docker.io/library/nginx:1.14-alpine, got: foo:non-existent.
+Jun  6 13:53:28.469: INFO: Pod daemon-set-26dfk is not available
+Jun  6 13:53:28.471: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 13:53:29.470: INFO: Pod daemon-set-jq2l6 is not available
+Jun  6 13:53:29.473: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:68
+STEP: Deleting DaemonSet "daemon-set"
+STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-7146, will wait for the garbage collector to delete the pods
+Jun  6 13:53:29.557: INFO: Deleting DaemonSet.extensions daemon-set took: 26.817232ms
+Jun  6 13:53:29.858: INFO: Terminating DaemonSet.extensions daemon-set pods took: 300.423316ms
+Jun  6 13:54:45.860: INFO: Number of nodes with available pods: 0
+Jun  6 13:54:45.860: INFO: Number of running nodes: 0, number of available pods: 0
+Jun  6 13:54:45.862: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/daemonsets-7146/daemonsets","resourceVersion":"19088"},"items":null}
+
+Jun  6 13:54:45.863: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/daemonsets-7146/pods","resourceVersion":"19088"},"items":null}
+
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:54:45.870: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "daemonsets-7146" for this suite.
+Jun  6 13:54:51.878: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:54:51.951: INFO: namespace daemonsets-7146 deletion completed in 6.079762642s
+
+• [SLOW TEST:96.673 seconds]
+[sig-apps] Daemon set [Serial]
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should rollback without unnecessary restarts [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl run rc 
+  should create an rc from an image  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:54:51.952: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in kubectl-1162
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213
+[BeforeEach] [k8s.io] Kubectl run rc
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1354
+[It] should create an rc from an image  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: running the image docker.io/library/nginx:1.14-alpine
+Jun  6 13:54:52.077: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 run e2e-test-nginx-rc --image=docker.io/library/nginx:1.14-alpine --generator=run/v1 --namespace=kubectl-1162'
+Jun  6 13:54:52.316: INFO: stderr: "kubectl run --generator=run/v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n"
+Jun  6 13:54:52.316: INFO: stdout: "replicationcontroller/e2e-test-nginx-rc created\n"
+STEP: verifying the rc e2e-test-nginx-rc was created
+STEP: verifying the pod controlled by rc e2e-test-nginx-rc was created
+STEP: confirm that you can get logs from an rc
+Jun  6 13:54:52.326: INFO: Waiting up to 5m0s for 1 pods to be running and ready: [e2e-test-nginx-rc-q5kf2]
+Jun  6 13:54:52.326: INFO: Waiting up to 5m0s for pod "e2e-test-nginx-rc-q5kf2" in namespace "kubectl-1162" to be "running and ready"
+Jun  6 13:54:52.329: INFO: Pod "e2e-test-nginx-rc-q5kf2": Phase="Pending", Reason="", readiness=false. Elapsed: 3.201087ms
+Jun  6 13:54:54.362: INFO: Pod "e2e-test-nginx-rc-q5kf2": Phase="Running", Reason="", readiness=true. Elapsed: 2.036707538s
+Jun  6 13:54:54.362: INFO: Pod "e2e-test-nginx-rc-q5kf2" satisfied condition "running and ready"
+Jun  6 13:54:54.362: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [e2e-test-nginx-rc-q5kf2]
+Jun  6 13:54:54.363: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 logs rc/e2e-test-nginx-rc --namespace=kubectl-1162'
+Jun  6 13:54:54.486: INFO: stderr: ""
+Jun  6 13:54:54.486: INFO: stdout: ""
+[AfterEach] [k8s.io] Kubectl run rc
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1359
+Jun  6 13:54:54.486: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 delete rc e2e-test-nginx-rc --namespace=kubectl-1162'
+Jun  6 13:54:54.591: INFO: stderr: ""
+Jun  6 13:54:54.591: INFO: stdout: "replicationcontroller \"e2e-test-nginx-rc\" deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:54:54.591: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-1162" for this suite.
+Jun  6 13:55:00.603: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:55:00.719: INFO: namespace kubectl-1162 deletion completed in 6.12420437s
+
+• [SLOW TEST:8.767 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl run rc
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    should create an rc from an image  [Conformance]
+    /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-network] DNS 
+  should provide DNS for the cluster  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-network] DNS
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:55:00.720: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename dns
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in dns-3946
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide DNS for the cluster  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@kubernetes.default.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-3946.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;sleep 1; done
+
+STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@kubernetes.default.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-3946.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;sleep 1; done
+
+STEP: creating a pod to probe DNS
+STEP: submitting the pod to kubernetes
+STEP: retrieving the pod
+STEP: looking for the results for each expected name from probers
+Jun  6 13:55:02.966: INFO: DNS probes using dns-3946/dns-test-ad6cde09-8862-11e9-b613-8a9bc7c14a19 succeeded
+
+STEP: deleting the pod
+[AfterEach] [sig-network] DNS
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:55:02.976: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "dns-3946" for this suite.
+Jun  6 13:55:08.986: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:55:09.147: INFO: namespace dns-3946 deletion completed in 6.168174211s
+
+• [SLOW TEST:8.427 seconds]
+[sig-network] DNS
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22
+  should provide DNS for the cluster  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] KubeletManagedEtcHosts 
+  should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] KubeletManagedEtcHosts
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:55:09.147: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename e2e-kubelet-etc-hosts
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-kubelet-etc-hosts-8193
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Setting up the test
+STEP: Creating hostNetwork=false pod
+STEP: Creating hostNetwork=true pod
+STEP: Running the test
+STEP: Verifying /etc/hosts of container is kubelet-managed for pod with hostNetwork=false
+Jun  6 13:55:15.420: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-8193 PodName:test-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  6 13:55:15.420: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+Jun  6 13:55:15.571: INFO: Exec stderr: ""
+Jun  6 13:55:15.572: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-8193 PodName:test-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  6 13:55:15.572: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+Jun  6 13:55:15.753: INFO: Exec stderr: ""
+Jun  6 13:55:15.753: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-8193 PodName:test-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  6 13:55:15.754: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+Jun  6 13:55:15.910: INFO: Exec stderr: ""
+Jun  6 13:55:15.910: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-8193 PodName:test-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  6 13:55:15.910: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+Jun  6 13:55:16.091: INFO: Exec stderr: ""
+STEP: Verifying /etc/hosts of container is not kubelet-managed since container specifies /etc/hosts mount
+Jun  6 13:55:16.091: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-8193 PodName:test-pod ContainerName:busybox-3 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  6 13:55:16.091: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+Jun  6 13:55:16.300: INFO: Exec stderr: ""
+Jun  6 13:55:16.300: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-8193 PodName:test-pod ContainerName:busybox-3 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  6 13:55:16.300: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+Jun  6 13:55:16.461: INFO: Exec stderr: ""
+STEP: Verifying /etc/hosts content of container is not kubelet-managed for pod with hostNetwork=true
+Jun  6 13:55:16.462: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-8193 PodName:test-host-network-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  6 13:55:16.462: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+Jun  6 13:55:16.620: INFO: Exec stderr: ""
+Jun  6 13:55:16.621: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-8193 PodName:test-host-network-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  6 13:55:16.621: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+Jun  6 13:55:16.782: INFO: Exec stderr: ""
+Jun  6 13:55:16.782: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-8193 PodName:test-host-network-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  6 13:55:16.782: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+Jun  6 13:55:16.933: INFO: Exec stderr: ""
+Jun  6 13:55:16.933: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-8193 PodName:test-host-network-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  6 13:55:16.933: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+Jun  6 13:55:17.104: INFO: Exec stderr: ""
+[AfterEach] [k8s.io] KubeletManagedEtcHosts
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:55:17.104: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-kubelet-etc-hosts-8193" for this suite.
+Jun  6 13:55:55.113: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:55:55.180: INFO: namespace e2e-kubelet-etc-hosts-8193 deletion completed in 38.07298835s
+
+• [SLOW TEST:46.033 seconds]
+[k8s.io] KubeletManagedEtcHosts
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected configMap 
+  should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:55:55.180: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-4612
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating configMap with name projected-configmap-test-volume-cdddf0a9-8862-11e9-b613-8a9bc7c14a19
+STEP: Creating a pod to test consume configMaps
+Jun  6 13:55:55.366: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-cdde5f04-8862-11e9-b613-8a9bc7c14a19" in namespace "projected-4612" to be "success or failure"
+Jun  6 13:55:55.368: INFO: Pod "pod-projected-configmaps-cdde5f04-8862-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.327015ms
+Jun  6 13:55:57.370: INFO: Pod "pod-projected-configmaps-cdde5f04-8862-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.004652125s
+STEP: Saw pod success
+Jun  6 13:55:57.370: INFO: Pod "pod-projected-configmaps-cdde5f04-8862-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 13:55:57.372: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-projected-configmaps-cdde5f04-8862-11e9-b613-8a9bc7c14a19 container projected-configmap-volume-test: 
+STEP: delete the pod
+Jun  6 13:55:57.389: INFO: Waiting for pod pod-projected-configmaps-cdde5f04-8862-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 13:55:57.391: INFO: Pod pod-projected-configmaps-cdde5f04-8862-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:55:57.391: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-4612" for this suite.
+Jun  6 13:56:03.399: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:56:03.464: INFO: namespace projected-4612 deletion completed in 6.070934274s
+
+• [SLOW TEST:8.283 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:33
+  should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSS
+------------------------------
+[sig-api-machinery] Aggregator 
+  Should be able to support the 1.10 Sample API Server using the current Aggregator [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-api-machinery] Aggregator
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:56:03.464: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename aggregator
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in aggregator-6431
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-api-machinery] Aggregator
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/aggregator.go:69
+[It] Should be able to support the 1.10 Sample API Server using the current Aggregator [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Registering the sample API server.
+Jun  6 13:56:04.371: INFO: deployment "sample-apiserver-deployment" doesn't have the required revision set
+Jun  6 13:56:06.403: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695426164, loc:(*time.Location)(0x8a140e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695426164, loc:(*time.Location)(0x8a140e0)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695426164, loc:(*time.Location)(0x8a140e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695426164, loc:(*time.Location)(0x8a140e0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-65db6755fc\" is progressing."}}, CollisionCount:(*int32)(nil)}
+Jun  6 13:56:08.405: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695426164, loc:(*time.Location)(0x8a140e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695426164, loc:(*time.Location)(0x8a140e0)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695426164, loc:(*time.Location)(0x8a140e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695426164, loc:(*time.Location)(0x8a140e0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-65db6755fc\" is progressing."}}, CollisionCount:(*int32)(nil)}
+Jun  6 13:56:10.406: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695426164, loc:(*time.Location)(0x8a140e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695426164, loc:(*time.Location)(0x8a140e0)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695426164, loc:(*time.Location)(0x8a140e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695426164, loc:(*time.Location)(0x8a140e0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-65db6755fc\" is progressing."}}, CollisionCount:(*int32)(nil)}
+Jun  6 13:56:13.442: INFO: Waited 1.020021114s for the sample-apiserver to be ready to handle requests.
+[AfterEach] [sig-api-machinery] Aggregator
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/aggregator.go:60
+[AfterEach] [sig-api-machinery] Aggregator
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:56:14.146: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "aggregator-6431" for this suite.
+Jun  6 13:56:20.260: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:56:20.328: INFO: namespace aggregator-6431 deletion completed in 6.152339377s
+
+• [SLOW TEST:16.865 seconds]
+[sig-api-machinery] Aggregator
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  Should be able to support the 1.10 Sample API Server using the current Aggregator [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:56:20.329: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in emptydir-4756
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test emptydir 0666 on tmpfs
+Jun  6 13:56:20.465: INFO: Waiting up to 5m0s for pod "pod-dcd3cc35-8862-11e9-b613-8a9bc7c14a19" in namespace "emptydir-4756" to be "success or failure"
+Jun  6 13:56:20.468: INFO: Pod "pod-dcd3cc35-8862-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 3.33994ms
+Jun  6 13:56:22.470: INFO: Pod "pod-dcd3cc35-8862-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.005849291s
+STEP: Saw pod success
+Jun  6 13:56:22.471: INFO: Pod "pod-dcd3cc35-8862-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 13:56:22.472: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-dcd3cc35-8862-11e9-b613-8a9bc7c14a19 container test-container: 
+STEP: delete the pod
+Jun  6 13:56:22.486: INFO: Waiting for pod pod-dcd3cc35-8862-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 13:56:22.488: INFO: Pod pod-dcd3cc35-8862-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:56:22.488: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-4756" for this suite.
+Jun  6 13:56:28.498: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:56:28.652: INFO: namespace emptydir-4756 deletion completed in 6.161486951s
+
+• [SLOW TEST:8.323 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:56:28.654: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in downward-api-8172
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward API volume plugin
+Jun  6 13:56:28.792: INFO: Waiting up to 5m0s for pod "downwardapi-volume-e1cb2342-8862-11e9-b613-8a9bc7c14a19" in namespace "downward-api-8172" to be "success or failure"
+Jun  6 13:56:28.799: INFO: Pod "downwardapi-volume-e1cb2342-8862-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 6.643677ms
+Jun  6 13:56:30.801: INFO: Pod "downwardapi-volume-e1cb2342-8862-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.0091657s
+STEP: Saw pod success
+Jun  6 13:56:30.801: INFO: Pod "downwardapi-volume-e1cb2342-8862-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 13:56:30.803: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod downwardapi-volume-e1cb2342-8862-11e9-b613-8a9bc7c14a19 container client-container: 
+STEP: delete the pod
+Jun  6 13:56:30.818: INFO: Waiting for pod downwardapi-volume-e1cb2342-8862-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 13:56:30.819: INFO: Pod downwardapi-volume-e1cb2342-8862-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:56:30.820: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-8172" for this suite.
+Jun  6 13:56:36.833: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:56:36.900: INFO: namespace downward-api-8172 deletion completed in 6.078777131s
+
+• [SLOW TEST:8.246 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] ReplicationController 
+  should adopt matching pods on creation [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-apps] ReplicationController
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:56:36.901: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename replication-controller
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in replication-controller-2105
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should adopt matching pods on creation [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Given a Pod with a 'name' label pod-adoption is created
+STEP: When a replication controller with a matching selector is created
+STEP: Then the orphan pod is adopted
+[AfterEach] [sig-apps] ReplicationController
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:56:40.059: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "replication-controller-2105" for this suite.
+Jun  6 13:57:02.068: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:57:02.347: INFO: namespace replication-controller-2105 deletion completed in 22.285917929s
+
+• [SLOW TEST:25.447 seconds]
+[sig-apps] ReplicationController
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should adopt matching pods on creation [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Namespaces [Serial] 
+  should ensure that all services are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-api-machinery] Namespaces [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:57:02.348: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename namespaces
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in namespaces-2572
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should ensure that all services are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a test namespace
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in nsdeletetest-6140
+STEP: Waiting for a default service account to be provisioned in namespace
+STEP: Creating a service in the namespace
+STEP: Deleting the namespace
+STEP: Waiting for the namespace to be removed.
+STEP: Recreating the namespace
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in nsdeletetest-1442
+STEP: Verifying there is no service in the namespace
+[AfterEach] [sig-api-machinery] Namespaces [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:57:08.856: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "namespaces-2572" for this suite.
+Jun  6 13:57:14.867: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:57:14.952: INFO: namespace namespaces-2572 deletion completed in 6.093045482s
+STEP: Destroying namespace "nsdeletetest-6140" for this suite.
+Jun  6 13:57:14.954: INFO: Namespace nsdeletetest-6140 was already deleted
+STEP: Destroying namespace "nsdeletetest-1442" for this suite.
+Jun  6 13:57:20.960: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:57:21.035: INFO: namespace nsdeletetest-1442 deletion completed in 6.081118965s
+
+• [SLOW TEST:18.687 seconds]
+[sig-api-machinery] Namespaces [Serial]
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should ensure that all services are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSS
+------------------------------
+[sig-storage] ConfigMap 
+  optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:57:21.035: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename configmap
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in configmap-8013
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating configMap with name cm-test-opt-del-0104756c-8863-11e9-b613-8a9bc7c14a19
+STEP: Creating configMap with name cm-test-opt-upd-010475aa-8863-11e9-b613-8a9bc7c14a19
+STEP: Creating the pod
+STEP: Deleting configmap cm-test-opt-del-0104756c-8863-11e9-b613-8a9bc7c14a19
+STEP: Updating configmap cm-test-opt-upd-010475aa-8863-11e9-b613-8a9bc7c14a19
+STEP: Creating configMap with name cm-test-opt-create-010475c3-8863-11e9-b613-8a9bc7c14a19
+STEP: waiting to observe update in volume
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:57:25.242: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "configmap-8013" for this suite.
+Jun  6 13:57:47.251: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:57:47.358: INFO: namespace configmap-8013 deletion completed in 22.113679723s
+
+• [SLOW TEST:26.323 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32
+  optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SS
+------------------------------
+[sig-storage] ConfigMap 
+  should be consumable from pods in volume as non-root [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:57:47.358: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename configmap
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in configmap-4327
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume as non-root [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating configMap with name configmap-test-volume-10b3290f-8863-11e9-b613-8a9bc7c14a19
+STEP: Creating a pod to test consume configMaps
+Jun  6 13:57:47.491: INFO: Waiting up to 5m0s for pod "pod-configmaps-10b38dc6-8863-11e9-b613-8a9bc7c14a19" in namespace "configmap-4327" to be "success or failure"
+Jun  6 13:57:47.494: INFO: Pod "pod-configmaps-10b38dc6-8863-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 3.552693ms
+Jun  6 13:57:49.497: INFO: Pod "pod-configmaps-10b38dc6-8863-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.006092043s
+Jun  6 13:57:51.499: INFO: Pod "pod-configmaps-10b38dc6-8863-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.008562732s
+STEP: Saw pod success
+Jun  6 13:57:51.499: INFO: Pod "pod-configmaps-10b38dc6-8863-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 13:57:51.501: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-configmaps-10b38dc6-8863-11e9-b613-8a9bc7c14a19 container configmap-volume-test: 
+STEP: delete the pod
+Jun  6 13:57:51.515: INFO: Waiting for pod pod-configmaps-10b38dc6-8863-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 13:57:51.517: INFO: Pod pod-configmaps-10b38dc6-8863-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:57:51.517: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "configmap-4327" for this suite.
+Jun  6 13:57:57.529: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:57:57.650: INFO: namespace configmap-4327 deletion completed in 6.129379419s
+
+• [SLOW TEST:10.292 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32
+  should be consumable from pods in volume as non-root [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSS
+------------------------------
+[sig-storage] ConfigMap 
+  binary data should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:57:57.651: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename configmap
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in configmap-6870
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] binary data should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating configMap with name configmap-test-upd-16de107f-8863-11e9-b613-8a9bc7c14a19
+STEP: Creating the pod
+STEP: Waiting for pod with text data
+STEP: Waiting for pod with binary data
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:58:01.863: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "configmap-6870" for this suite.
+Jun  6 13:58:23.874: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:58:23.947: INFO: namespace configmap-6870 deletion completed in 22.08061535s
+
+• [SLOW TEST:26.296 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32
+  binary data should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:58:23.947: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in emptydir-7315
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test emptydir volume type on node default medium
+Jun  6 13:58:24.077: INFO: Waiting up to 5m0s for pod "pod-26821b69-8863-11e9-b613-8a9bc7c14a19" in namespace "emptydir-7315" to be "success or failure"
+Jun  6 13:58:24.081: INFO: Pod "pod-26821b69-8863-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 3.827106ms
+Jun  6 13:58:26.083: INFO: Pod "pod-26821b69-8863-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.00615716s
+STEP: Saw pod success
+Jun  6 13:58:26.083: INFO: Pod "pod-26821b69-8863-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 13:58:26.085: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-26821b69-8863-11e9-b613-8a9bc7c14a19 container test-container: 
+STEP: delete the pod
+Jun  6 13:58:26.097: INFO: Waiting for pod pod-26821b69-8863-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 13:58:26.099: INFO: Pod pod-26821b69-8863-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:58:26.099: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-7315" for this suite.
+Jun  6 13:58:32.108: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:58:32.178: INFO: namespace emptydir-7315 deletion completed in 6.077122328s
+
+• [SLOW TEST:8.231 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSS
+------------------------------
+[sig-network] DNS 
+  should provide DNS for services  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-network] DNS
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:58:32.178: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename dns
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in dns-5293
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide DNS for services  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a test headless service
+STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service.dns-5293.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.dns-5293.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-5293.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.dns-5293.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-5293.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.dns-test-service.dns-5293.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-5293.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.dns-test-service.dns-5293.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-5293.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.test-service-2.dns-5293.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-5293.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.test-service-2.dns-5293.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-5293.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;check="$$(dig +notcp +noall +answer +search 139.243.66.100.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/100.66.243.139_udp@PTR;check="$$(dig +tcp +noall +answer +search 139.243.66.100.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/100.66.243.139_tcp@PTR;sleep 1; done
+
+STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service.dns-5293.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.dns-5293.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-5293.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.dns-5293.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-5293.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.dns-test-service.dns-5293.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-5293.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.dns-test-service.dns-5293.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-5293.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.test-service-2.dns-5293.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-5293.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.test-service-2.dns-5293.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-5293.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;check="$$(dig +notcp +noall +answer +search 139.243.66.100.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/100.66.243.139_udp@PTR;check="$$(dig +tcp +noall +answer +search 139.243.66.100.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/100.66.243.139_tcp@PTR;sleep 1; done
+
+STEP: creating a pod to probe DNS
+STEP: submitting the pod to kubernetes
+STEP: retrieving the pod
+STEP: looking for the results for each expected name from probers
+Jun  6 13:58:34.348: INFO: Unable to read wheezy_udp@dns-test-service.dns-5293.svc.cluster.local from pod dns-5293/dns-test-2b6c44de-8863-11e9-b613-8a9bc7c14a19: the server could not find the requested resource (get pods dns-test-2b6c44de-8863-11e9-b613-8a9bc7c14a19)
+Jun  6 13:58:34.351: INFO: Unable to read wheezy_tcp@dns-test-service.dns-5293.svc.cluster.local from pod dns-5293/dns-test-2b6c44de-8863-11e9-b613-8a9bc7c14a19: the server could not find the requested resource (get pods dns-test-2b6c44de-8863-11e9-b613-8a9bc7c14a19)
+Jun  6 13:58:34.354: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-5293.svc.cluster.local from pod dns-5293/dns-test-2b6c44de-8863-11e9-b613-8a9bc7c14a19: the server could not find the requested resource (get pods dns-test-2b6c44de-8863-11e9-b613-8a9bc7c14a19)
+Jun  6 13:58:34.357: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-5293.svc.cluster.local from pod dns-5293/dns-test-2b6c44de-8863-11e9-b613-8a9bc7c14a19: the server could not find the requested resource (get pods dns-test-2b6c44de-8863-11e9-b613-8a9bc7c14a19)
+Jun  6 13:58:34.377: INFO: Unable to read jessie_udp@dns-test-service.dns-5293.svc.cluster.local from pod dns-5293/dns-test-2b6c44de-8863-11e9-b613-8a9bc7c14a19: the server could not find the requested resource (get pods dns-test-2b6c44de-8863-11e9-b613-8a9bc7c14a19)
+Jun  6 13:58:34.379: INFO: Unable to read jessie_tcp@dns-test-service.dns-5293.svc.cluster.local from pod dns-5293/dns-test-2b6c44de-8863-11e9-b613-8a9bc7c14a19: the server could not find the requested resource (get pods dns-test-2b6c44de-8863-11e9-b613-8a9bc7c14a19)
+Jun  6 13:58:34.382: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-5293.svc.cluster.local from pod dns-5293/dns-test-2b6c44de-8863-11e9-b613-8a9bc7c14a19: the server could not find the requested resource (get pods dns-test-2b6c44de-8863-11e9-b613-8a9bc7c14a19)
+Jun  6 13:58:34.385: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-5293.svc.cluster.local from pod dns-5293/dns-test-2b6c44de-8863-11e9-b613-8a9bc7c14a19: the server could not find the requested resource (get pods dns-test-2b6c44de-8863-11e9-b613-8a9bc7c14a19)
+Jun  6 13:58:34.402: INFO: Lookups using dns-5293/dns-test-2b6c44de-8863-11e9-b613-8a9bc7c14a19 failed for: [wheezy_udp@dns-test-service.dns-5293.svc.cluster.local wheezy_tcp@dns-test-service.dns-5293.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-5293.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-5293.svc.cluster.local jessie_udp@dns-test-service.dns-5293.svc.cluster.local jessie_tcp@dns-test-service.dns-5293.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-5293.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-5293.svc.cluster.local]
+
+Jun  6 13:58:39.459: INFO: DNS probes using dns-5293/dns-test-2b6c44de-8863-11e9-b613-8a9bc7c14a19 succeeded
+
+STEP: deleting the pod
+STEP: deleting the test service
+STEP: deleting the test headless service
+[AfterEach] [sig-network] DNS
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:58:39.496: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "dns-5293" for this suite.
+Jun  6 13:58:45.508: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:58:45.840: INFO: namespace dns-5293 deletion completed in 6.339624767s
+
+• [SLOW TEST:13.662 seconds]
+[sig-network] DNS
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22
+  should provide DNS for services  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+S
+------------------------------
+[k8s.io] Probing container 
+  should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:58:45.840: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename container-probe
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in container-probe-3877
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:51
+[It] should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating pod liveness-http in namespace container-probe-3877
+Jun  6 13:58:49.980: INFO: Started pod liveness-http in namespace container-probe-3877
+STEP: checking the pod's current state and verifying that restartCount is present
+Jun  6 13:58:49.982: INFO: Initial restart count of pod liveness-http is 0
+Jun  6 13:59:08.008: INFO: Restart count of pod container-probe-3877/liveness-http is now 1 (18.025476306s elapsed)
+STEP: deleting the pod
+[AfterEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:59:08.017: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-probe-3877" for this suite.
+Jun  6 13:59:14.043: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:59:14.113: INFO: namespace container-probe-3877 deletion completed in 6.092807969s
+
+• [SLOW TEST:28.273 seconds]
+[k8s.io] Probing container
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+S
+------------------------------
+[k8s.io] Docker Containers 
+  should use the image defaults if command and args are blank [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:59:14.113: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename containers
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in containers-4708
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should use the image defaults if command and args are blank [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test use defaults
+Jun  6 13:59:14.245: INFO: Waiting up to 5m0s for pod "client-containers-4469222a-8863-11e9-b613-8a9bc7c14a19" in namespace "containers-4708" to be "success or failure"
+Jun  6 13:59:14.250: INFO: Pod "client-containers-4469222a-8863-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 4.741547ms
+Jun  6 13:59:16.252: INFO: Pod "client-containers-4469222a-8863-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.007372697s
+STEP: Saw pod success
+Jun  6 13:59:16.252: INFO: Pod "client-containers-4469222a-8863-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 13:59:16.254: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod client-containers-4469222a-8863-11e9-b613-8a9bc7c14a19 container test-container: 
+STEP: delete the pod
+Jun  6 13:59:16.269: INFO: Waiting for pod client-containers-4469222a-8863-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 13:59:16.275: INFO: Pod client-containers-4469222a-8863-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 13:59:16.275: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "containers-4708" for this suite.
+Jun  6 13:59:22.284: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 13:59:22.352: INFO: namespace containers-4708 deletion completed in 6.074651592s
+
+• [SLOW TEST:8.239 seconds]
+[k8s.io] Docker Containers
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should use the image defaults if command and args are blank [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSS
+------------------------------
+[k8s.io] Probing container 
+  with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 13:59:22.353: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename container-probe
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in container-probe-8137
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:51
+[It] with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[AfterEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:00:22.488: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-probe-8137" for this suite.
+Jun  6 14:00:44.499: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:00:44.625: INFO: namespace container-probe-8137 deletion completed in 22.133822216s
+
+• [SLOW TEST:82.272 seconds]
+[k8s.io] Probing container
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:00:44.629: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in emptydir-7483
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test emptydir 0644 on node default medium
+Jun  6 14:00:44.760: INFO: Waiting up to 5m0s for pod "pod-7a5c9185-8863-11e9-b613-8a9bc7c14a19" in namespace "emptydir-7483" to be "success or failure"
+Jun  6 14:00:44.764: INFO: Pod "pod-7a5c9185-8863-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 3.746053ms
+Jun  6 14:00:46.766: INFO: Pod "pod-7a5c9185-8863-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006510369s
+STEP: Saw pod success
+Jun  6 14:00:46.767: INFO: Pod "pod-7a5c9185-8863-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:00:46.768: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-7a5c9185-8863-11e9-b613-8a9bc7c14a19 container test-container: 
+STEP: delete the pod
+Jun  6 14:00:46.783: INFO: Waiting for pod pod-7a5c9185-8863-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:00:46.786: INFO: Pod pod-7a5c9185-8863-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:00:46.786: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-7483" for this suite.
+Jun  6 14:00:52.795: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:00:52.863: INFO: namespace emptydir-7483 deletion completed in 6.074434975s
+
+• [SLOW TEST:8.234 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSS
+------------------------------
+[sig-node] Downward API 
+  should provide host IP as an env var [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-node] Downward API
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:00:52.863: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in downward-api-3540
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide host IP as an env var [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward api env vars
+Jun  6 14:00:52.994: INFO: Waiting up to 5m0s for pod "downward-api-7f452f34-8863-11e9-b613-8a9bc7c14a19" in namespace "downward-api-3540" to be "success or failure"
+Jun  6 14:00:52.996: INFO: Pod "downward-api-7f452f34-8863-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.405245ms
+Jun  6 14:00:54.999: INFO: Pod "downward-api-7f452f34-8863-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.005300735s
+STEP: Saw pod success
+Jun  6 14:00:54.999: INFO: Pod "downward-api-7f452f34-8863-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:00:55.003: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod downward-api-7f452f34-8863-11e9-b613-8a9bc7c14a19 container dapi-container: 
+STEP: delete the pod
+Jun  6 14:00:55.015: INFO: Waiting for pod downward-api-7f452f34-8863-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:00:55.020: INFO: Pod downward-api-7f452f34-8863-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-node] Downward API
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:00:55.020: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-3540" for this suite.
+Jun  6 14:01:01.029: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:01:01.161: INFO: namespace downward-api-3540 deletion completed in 6.138656706s
+
+• [SLOW TEST:8.298 seconds]
+[sig-node] Downward API
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:38
+  should provide host IP as an env var [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+[sig-network] Networking Granular Checks: Pods 
+  should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-network] Networking
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:01:01.161: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename pod-network-test
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in pod-network-test-5774
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Performing setup for networking test in namespace pod-network-test-5774
+STEP: creating a selector
+STEP: Creating the service pods in kubernetes
+Jun  6 14:01:01.287: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable
+STEP: Creating test pods
+Jun  6 14:01:25.339: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://100.96.2.178:8080/hostName | grep -v '^\s*$'] Namespace:pod-network-test-5774 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  6 14:01:25.339: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+Jun  6 14:01:25.495: INFO: Found all expected endpoints: [netserver-0]
+Jun  6 14:01:25.498: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://100.96.1.48:8080/hostName | grep -v '^\s*$'] Namespace:pod-network-test-5774 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  6 14:01:25.498: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+Jun  6 14:01:25.705: INFO: Found all expected endpoints: [netserver-1]
+[AfterEach] [sig-network] Networking
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:01:25.705: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pod-network-test-5774" for this suite.
+Jun  6 14:01:47.726: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:01:47.793: INFO: namespace pod-network-test-5774 deletion completed in 22.08510163s
+
+• [SLOW TEST:46.633 seconds]
+[sig-network] Networking
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:25
+  Granular Checks: Pods
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:28
+    should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]
+    /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected configMap 
+  should be consumable from pods in volume with mappings as non-root [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:01:47.794: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-7598
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings as non-root [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating configMap with name projected-configmap-test-volume-map-a002dc14-8863-11e9-b613-8a9bc7c14a19
+STEP: Creating a pod to test consume configMaps
+Jun  6 14:01:47.928: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-a0033bc5-8863-11e9-b613-8a9bc7c14a19" in namespace "projected-7598" to be "success or failure"
+Jun  6 14:01:47.934: INFO: Pod "pod-projected-configmaps-a0033bc5-8863-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 5.631849ms
+Jun  6 14:01:49.937: INFO: Pod "pod-projected-configmaps-a0033bc5-8863-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.009286514s
+STEP: Saw pod success
+Jun  6 14:01:49.937: INFO: Pod "pod-projected-configmaps-a0033bc5-8863-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:01:49.940: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-projected-configmaps-a0033bc5-8863-11e9-b613-8a9bc7c14a19 container projected-configmap-volume-test: 
+STEP: delete the pod
+Jun  6 14:01:49.971: INFO: Waiting for pod pod-projected-configmaps-a0033bc5-8863-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:01:49.981: INFO: Pod pod-projected-configmaps-a0033bc5-8863-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:01:49.983: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-7598" for this suite.
+Jun  6 14:01:56.004: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:01:56.082: INFO: namespace projected-7598 deletion completed in 6.092151834s
+
+• [SLOW TEST:8.289 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:33
+  should be consumable from pods in volume with mappings as non-root [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSS
+------------------------------
+[sig-storage] Secrets 
+  should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Secrets
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:01:56.083: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename secrets
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in secrets-710
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating secret with name secret-test-map-a4f96d80-8863-11e9-b613-8a9bc7c14a19
+STEP: Creating a pod to test consume secrets
+Jun  6 14:01:56.255: INFO: Waiting up to 5m0s for pod "pod-secrets-a4f9f382-8863-11e9-b613-8a9bc7c14a19" in namespace "secrets-710" to be "success or failure"
+Jun  6 14:01:56.260: INFO: Pod "pod-secrets-a4f9f382-8863-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 4.613299ms
+Jun  6 14:01:58.263: INFO: Pod "pod-secrets-a4f9f382-8863-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.008333567s
+STEP: Saw pod success
+Jun  6 14:01:58.263: INFO: Pod "pod-secrets-a4f9f382-8863-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:01:58.267: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-secrets-a4f9f382-8863-11e9-b613-8a9bc7c14a19 container secret-volume-test: 
+STEP: delete the pod
+Jun  6 14:01:58.281: INFO: Waiting for pod pod-secrets-a4f9f382-8863-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:01:58.283: INFO: Pod pod-secrets-a4f9f382-8863-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:01:58.283: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "secrets-710" for this suite.
+Jun  6 14:02:04.293: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:02:04.547: INFO: namespace secrets-710 deletion completed in 6.261829287s
+
+• [SLOW TEST:8.464 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:33
+  should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should provide podname only [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:02:04.547: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in downward-api-2809
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should provide podname only [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward API volume plugin
+Jun  6 14:02:04.680: INFO: Waiting up to 5m0s for pod "downwardapi-volume-a9ff520b-8863-11e9-b613-8a9bc7c14a19" in namespace "downward-api-2809" to be "success or failure"
+Jun  6 14:02:04.684: INFO: Pod "downwardapi-volume-a9ff520b-8863-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 3.491326ms
+Jun  6 14:02:06.687: INFO: Pod "downwardapi-volume-a9ff520b-8863-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006508735s
+STEP: Saw pod success
+Jun  6 14:02:06.687: INFO: Pod "downwardapi-volume-a9ff520b-8863-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:02:06.688: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod downwardapi-volume-a9ff520b-8863-11e9-b613-8a9bc7c14a19 container client-container: 
+STEP: delete the pod
+Jun  6 14:02:06.702: INFO: Waiting for pod downwardapi-volume-a9ff520b-8863-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:02:06.705: INFO: Pod downwardapi-volume-a9ff520b-8863-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:02:06.705: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-2809" for this suite.
+Jun  6 14:02:12.715: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:02:12.781: INFO: namespace downward-api-2809 deletion completed in 6.073627191s
+
+• [SLOW TEST:8.234 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should provide podname only [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should update annotations on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:02:12.781: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-7849
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should update annotations on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating the pod
+Jun  6 14:02:17.441: INFO: Successfully updated pod "annotationupdateaee7a256-8863-11e9-b613-8a9bc7c14a19"
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:02:19.458: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-7849" for this suite.
+Jun  6 14:02:41.468: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:02:41.534: INFO: namespace projected-7849 deletion completed in 22.073716815s
+
+• [SLOW TEST:28.753 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should update annotations on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SS
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should delete pods created by rc when not orphaning [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:02:41.535: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename gc
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in gc-9728
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should delete pods created by rc when not orphaning [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: create the rc
+STEP: delete the rc
+STEP: wait for all pods to be garbage collected
+STEP: Gathering metrics
+W0606 14:02:51.682152      14 metrics_grabber.go:79] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
+Jun  6 14:02:51.682: INFO: For apiserver_request_total:
+For apiserver_request_latencies_summary:
+For apiserver_init_events_total:
+For garbage_collector_attempt_to_delete_queue_latency:
+For garbage_collector_attempt_to_delete_work_duration:
+For garbage_collector_attempt_to_orphan_queue_latency:
+For garbage_collector_attempt_to_orphan_work_duration:
+For garbage_collector_dirty_processing_latency_microseconds:
+For garbage_collector_event_processing_latency_microseconds:
+For garbage_collector_graph_changes_queue_latency:
+For garbage_collector_graph_changes_work_duration:
+For garbage_collector_orphan_processing_latency_microseconds:
+For namespace_queue_latency:
+For namespace_queue_latency_sum:
+For namespace_queue_latency_count:
+For namespace_retries:
+For namespace_work_duration:
+For namespace_work_duration_sum:
+For namespace_work_duration_count:
+For function_duration_seconds:
+For errors_total:
+For evicted_pods_total:
+
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:02:51.682: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "gc-9728" for this suite.
+Jun  6 14:02:57.691: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:02:57.847: INFO: namespace gc-9728 deletion completed in 6.163112065s
+
+• [SLOW TEST:16.312 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should delete pods created by rc when not orphaning [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+[k8s.io] Pods 
+  should support remote command execution over websockets [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:02:57.847: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename pods
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in pods-8894
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:135
+[It] should support remote command execution over websockets [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+Jun  6 14:02:58.023: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: creating the pod
+STEP: submitting the pod to kubernetes
+[AfterEach] [k8s.io] Pods
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:03:02.189: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pods-8894" for this suite.
+Jun  6 14:03:46.200: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:03:46.264: INFO: namespace pods-8894 deletion completed in 44.071477624s
+
+• [SLOW TEST:48.417 seconds]
+[k8s.io] Pods
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should support remote command execution over websockets [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:03:46.265: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in emptydir-1385
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test emptydir 0777 on tmpfs
+Jun  6 14:03:46.397: INFO: Waiting up to 5m0s for pod "pod-e6a0125b-8863-11e9-b613-8a9bc7c14a19" in namespace "emptydir-1385" to be "success or failure"
+Jun  6 14:03:46.404: INFO: Pod "pod-e6a0125b-8863-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 6.12364ms
+Jun  6 14:03:48.408: INFO: Pod "pod-e6a0125b-8863-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.009952811s
+STEP: Saw pod success
+Jun  6 14:03:48.408: INFO: Pod "pod-e6a0125b-8863-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:03:48.410: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-e6a0125b-8863-11e9-b613-8a9bc7c14a19 container test-container: 
+STEP: delete the pod
+Jun  6 14:03:48.441: INFO: Waiting for pod pod-e6a0125b-8863-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:03:48.443: INFO: Pod pod-e6a0125b-8863-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:03:48.444: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-1385" for this suite.
+Jun  6 14:03:54.452: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:03:54.521: INFO: namespace emptydir-1385 deletion completed in 6.075733065s
+
+• [SLOW TEST:8.257 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+S
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl replace 
+  should update a single-container pod's image  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:03:54.522: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in kubectl-9895
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213
+[BeforeEach] [k8s.io] Kubectl replace
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1619
+[It] should update a single-container pod's image  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: running the image docker.io/library/nginx:1.14-alpine
+Jun  6 14:03:54.646: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 run e2e-test-nginx-pod --generator=run-pod/v1 --image=docker.io/library/nginx:1.14-alpine --labels=run=e2e-test-nginx-pod --namespace=kubectl-9895'
+Jun  6 14:03:54.720: INFO: stderr: ""
+Jun  6 14:03:54.720: INFO: stdout: "pod/e2e-test-nginx-pod created\n"
+STEP: verifying the pod e2e-test-nginx-pod is running
+STEP: verifying the pod e2e-test-nginx-pod was created
+Jun  6 14:03:59.771: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 get pod e2e-test-nginx-pod --namespace=kubectl-9895 -o json'
+Jun  6 14:03:59.833: INFO: stderr: ""
+Jun  6 14:03:59.833: INFO: stdout: "{\n    \"apiVersion\": \"v1\",\n    \"kind\": \"Pod\",\n    \"metadata\": {\n        \"annotations\": {\n            \"cni.projectcalico.org/podIP\": \"100.96.2.187/32\",\n            \"kubernetes.io/psp\": \"e2e-test-privileged-psp\"\n        },\n        \"creationTimestamp\": \"2019-06-06T14:03:54Z\",\n        \"labels\": {\n            \"run\": \"e2e-test-nginx-pod\"\n        },\n        \"name\": \"e2e-test-nginx-pod\",\n        \"namespace\": \"kubectl-9895\",\n        \"resourceVersion\": \"21031\",\n        \"selfLink\": \"/api/v1/namespaces/kubectl-9895/pods/e2e-test-nginx-pod\",\n        \"uid\": \"eb955fc7-8863-11e9-bdc9-0231d0af67bc\"\n    },\n    \"spec\": {\n        \"containers\": [\n            {\n                \"image\": \"docker.io/library/nginx:1.14-alpine\",\n                \"imagePullPolicy\": \"IfNotPresent\",\n                \"name\": \"e2e-test-nginx-pod\",\n                \"resources\": {},\n                \"terminationMessagePath\": \"/dev/termination-log\",\n                \"terminationMessagePolicy\": \"File\",\n                \"volumeMounts\": [\n                    {\n                        \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n                        \"name\": \"default-token-hjkfk\",\n                        \"readOnly\": true\n                    }\n                ]\n            }\n        ],\n        \"dnsPolicy\": \"ClusterFirst\",\n        \"enableServiceLinks\": true,\n        \"nodeName\": \"ip-172-16-66-200.ec2.internal\",\n        \"priority\": 0,\n        \"restartPolicy\": \"Always\",\n        \"schedulerName\": \"default-scheduler\",\n        \"securityContext\": {},\n        \"serviceAccount\": \"default\",\n        \"serviceAccountName\": \"default\",\n        \"terminationGracePeriodSeconds\": 30,\n        \"tolerations\": [\n            {\n                \"effect\": \"NoExecute\",\n                \"key\": \"node.kubernetes.io/not-ready\",\n                \"operator\": \"Exists\",\n                \"tolerationSeconds\": 300\n            },\n            {\n                \"effect\": \"NoExecute\",\n                \"key\": \"node.kubernetes.io/unreachable\",\n                \"operator\": \"Exists\",\n                \"tolerationSeconds\": 300\n            }\n        ],\n        \"volumes\": [\n            {\n                \"name\": \"default-token-hjkfk\",\n                \"secret\": {\n                    \"defaultMode\": 420,\n                    \"secretName\": \"default-token-hjkfk\"\n                }\n            }\n        ]\n    },\n    \"status\": {\n        \"conditions\": [\n            {\n                \"lastProbeTime\": null,\n                \"lastTransitionTime\": \"2019-06-06T14:03:54Z\",\n                \"status\": \"True\",\n                \"type\": \"Initialized\"\n            },\n            {\n                \"lastProbeTime\": null,\n                \"lastTransitionTime\": \"2019-06-06T14:03:56Z\",\n                \"status\": \"True\",\n                \"type\": \"Ready\"\n            },\n            {\n                \"lastProbeTime\": null,\n                \"lastTransitionTime\": \"2019-06-06T14:03:56Z\",\n                \"status\": \"True\",\n                \"type\": \"ContainersReady\"\n            },\n            {\n                \"lastProbeTime\": null,\n                \"lastTransitionTime\": \"2019-06-06T14:03:54Z\",\n                \"status\": \"True\",\n                \"type\": \"PodScheduled\"\n            }\n        ],\n        \"containerStatuses\": [\n            {\n                \"containerID\": \"docker://a8ec6a670ced772976fa3f1a3ea3f897ffff0e40fe9211b6029fa6e3cb24079b\",\n                \"image\": \"nginx:1.14-alpine\",\n                \"imageID\": \"docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7\",\n                \"lastState\": {},\n                \"name\": \"e2e-test-nginx-pod\",\n                \"ready\": true,\n                \"restartCount\": 0,\n                \"state\": {\n                    \"running\": {\n                        \"startedAt\": \"2019-06-06T14:03:55Z\"\n                    }\n                }\n            }\n        ],\n        \"hostIP\": \"172.16.66.200\",\n        \"phase\": \"Running\",\n        \"podIP\": \"100.96.2.187\",\n        \"qosClass\": \"BestEffort\",\n        \"startTime\": \"2019-06-06T14:03:54Z\"\n    }\n}\n"
+STEP: replace the image in the pod
+Jun  6 14:03:59.834: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 replace -f - --namespace=kubectl-9895'
+Jun  6 14:04:00.034: INFO: stderr: ""
+Jun  6 14:04:00.034: INFO: stdout: "pod/e2e-test-nginx-pod replaced\n"
+STEP: verifying the pod e2e-test-nginx-pod has the right image docker.io/library/busybox:1.29
+[AfterEach] [k8s.io] Kubectl replace
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1624
+Jun  6 14:04:00.121: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 delete pods e2e-test-nginx-pod --namespace=kubectl-9895'
+Jun  6 14:04:02.582: INFO: stderr: ""
+Jun  6 14:04:02.582: INFO: stdout: "pod \"e2e-test-nginx-pod\" deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:04:02.582: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-9895" for this suite.
+Jun  6 14:04:08.593: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:04:08.662: INFO: namespace kubectl-9895 deletion completed in 6.077438042s
+
+• [SLOW TEST:14.140 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl replace
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    should update a single-container pod's image  [Conformance]
+    /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSS
+------------------------------
+[k8s.io] Docker Containers 
+  should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:04:08.662: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename containers
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in containers-9430
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test override arguments
+Jun  6 14:04:08.794: INFO: Waiting up to 5m0s for pod "client-containers-f3f9914c-8863-11e9-b613-8a9bc7c14a19" in namespace "containers-9430" to be "success or failure"
+Jun  6 14:04:08.799: INFO: Pod "client-containers-f3f9914c-8863-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 5.038983ms
+Jun  6 14:04:10.801: INFO: Pod "client-containers-f3f9914c-8863-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.007588604s
+STEP: Saw pod success
+Jun  6 14:04:10.801: INFO: Pod "client-containers-f3f9914c-8863-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:04:10.803: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod client-containers-f3f9914c-8863-11e9-b613-8a9bc7c14a19 container test-container: 
+STEP: delete the pod
+Jun  6 14:04:10.817: INFO: Waiting for pod client-containers-f3f9914c-8863-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:04:10.819: INFO: Pod client-containers-f3f9914c-8863-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:04:10.819: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "containers-9430" for this suite.
+Jun  6 14:04:16.832: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:04:16.953: INFO: namespace containers-9430 deletion completed in 6.131449442s
+
+• [SLOW TEST:8.291 seconds]
+[k8s.io] Docker Containers
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Watchers 
+  should be able to restart watching from the last resource version observed by the previous watch [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:04:16.954: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename watch
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in watch-9293
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be able to restart watching from the last resource version observed by the previous watch [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating a watch on configmaps
+STEP: creating a new configmap
+STEP: modifying the configmap once
+STEP: closing the watch once it receives two notifications
+Jun  6 14:04:17.087: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-watch-closed,GenerateName:,Namespace:watch-9293,SelfLink:/api/v1/namespaces/watch-9293/configmaps/e2e-watch-test-watch-closed,UID:f8eb4179-8863-11e9-bdc9-0231d0af67bc,ResourceVersion:21115,Generation:0,CreationTimestamp:2019-06-06 14:04:17 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: watch-closed-and-restarted,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{},BinaryData:map[string][]byte{},}
+Jun  6 14:04:17.087: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-watch-closed,GenerateName:,Namespace:watch-9293,SelfLink:/api/v1/namespaces/watch-9293/configmaps/e2e-watch-test-watch-closed,UID:f8eb4179-8863-11e9-bdc9-0231d0af67bc,ResourceVersion:21116,Generation:0,CreationTimestamp:2019-06-06 14:04:17 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: watch-closed-and-restarted,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},}
+STEP: modifying the configmap a second time, while the watch is closed
+STEP: creating a new watch on configmaps from the last resource version observed by the first watch
+STEP: deleting the configmap
+STEP: Expecting to observe notifications for all changes to the configmap since the first watch closed
+Jun  6 14:04:17.095: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-watch-closed,GenerateName:,Namespace:watch-9293,SelfLink:/api/v1/namespaces/watch-9293/configmaps/e2e-watch-test-watch-closed,UID:f8eb4179-8863-11e9-bdc9-0231d0af67bc,ResourceVersion:21117,Generation:0,CreationTimestamp:2019-06-06 14:04:17 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: watch-closed-and-restarted,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+Jun  6 14:04:17.096: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-watch-closed,GenerateName:,Namespace:watch-9293,SelfLink:/api/v1/namespaces/watch-9293/configmaps/e2e-watch-test-watch-closed,UID:f8eb4179-8863-11e9-bdc9-0231d0af67bc,ResourceVersion:21118,Generation:0,CreationTimestamp:2019-06-06 14:04:17 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: watch-closed-and-restarted,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+[AfterEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:04:17.096: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "watch-9293" for this suite.
+Jun  6 14:04:23.105: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:04:23.169: INFO: namespace watch-9293 deletion completed in 6.07092881s
+
+• [SLOW TEST:6.216 seconds]
+[sig-api-machinery] Watchers
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should be able to restart watching from the last resource version observed by the previous watch [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] ReplicationController 
+  should release no longer matching pods [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-apps] ReplicationController
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:04:23.170: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename replication-controller
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in replication-controller-4551
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should release no longer matching pods [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Given a ReplicationController is created
+STEP: When the matched label of one of its pods change
+Jun  6 14:04:23.302: INFO: Pod name pod-release: Found 0 pods out of 1
+Jun  6 14:04:28.304: INFO: Pod name pod-release: Found 1 pods out of 1
+STEP: Then the pod is released
+[AfterEach] [sig-apps] ReplicationController
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:04:29.315: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "replication-controller-4551" for this suite.
+Jun  6 14:04:35.325: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:04:35.448: INFO: namespace replication-controller-4551 deletion completed in 6.130237979s
+
+• [SLOW TEST:12.278 seconds]
+[sig-apps] ReplicationController
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should release no longer matching pods [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:04:35.449: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in emptydir-4502
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test emptydir 0644 on node default medium
+Jun  6 14:04:35.582: INFO: Waiting up to 5m0s for pod "pod-03f164fe-8864-11e9-b613-8a9bc7c14a19" in namespace "emptydir-4502" to be "success or failure"
+Jun  6 14:04:35.587: INFO: Pod "pod-03f164fe-8864-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 4.442777ms
+Jun  6 14:04:37.620: INFO: Pod "pod-03f164fe-8864-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.037903398s
+STEP: Saw pod success
+Jun  6 14:04:37.620: INFO: Pod "pod-03f164fe-8864-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:04:37.622: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-03f164fe-8864-11e9-b613-8a9bc7c14a19 container test-container: 
+STEP: delete the pod
+Jun  6 14:04:37.638: INFO: Waiting for pod pod-03f164fe-8864-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:04:37.639: INFO: Pod pod-03f164fe-8864-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:04:37.639: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-4502" for this suite.
+Jun  6 14:04:43.649: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:04:43.713: INFO: namespace emptydir-4502 deletion completed in 6.071423653s
+
+• [SLOW TEST:8.264 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSS
+------------------------------
+[sig-storage] Projected secret 
+  should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected secret
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:04:43.713: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-6639
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating secret with name projected-secret-test-08e5ad16-8864-11e9-b613-8a9bc7c14a19
+STEP: Creating a pod to test consume secrets
+Jun  6 14:04:43.897: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-08e618e2-8864-11e9-b613-8a9bc7c14a19" in namespace "projected-6639" to be "success or failure"
+Jun  6 14:04:43.900: INFO: Pod "pod-projected-secrets-08e618e2-8864-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 3.706059ms
+Jun  6 14:04:45.903: INFO: Pod "pod-projected-secrets-08e618e2-8864-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006644278s
+STEP: Saw pod success
+Jun  6 14:04:45.903: INFO: Pod "pod-projected-secrets-08e618e2-8864-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:04:45.905: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-projected-secrets-08e618e2-8864-11e9-b613-8a9bc7c14a19 container secret-volume-test: 
+STEP: delete the pod
+Jun  6 14:04:45.920: INFO: Waiting for pod pod-projected-secrets-08e618e2-8864-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:04:45.921: INFO: Pod pod-projected-secrets-08e618e2-8864-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] Projected secret
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:04:45.921: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-6639" for this suite.
+Jun  6 14:04:51.932: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:04:52.087: INFO: namespace projected-6639 deletion completed in 6.163426187s
+
+• [SLOW TEST:8.374 seconds]
+[sig-storage] Projected secret
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:33
+  should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl cluster-info 
+  should check if Kubernetes master services is included in cluster-info  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:04:52.088: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in kubectl-9955
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213
+[It] should check if Kubernetes master services is included in cluster-info  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: validating cluster-info
+Jun  6 14:04:52.214: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 cluster-info'
+Jun  6 14:04:52.449: INFO: stderr: ""
+Jun  6 14:04:52.449: INFO: stdout: "\x1b[0;32mKubernetes master\x1b[0m is running at \x1b[0;33mhttps://100.64.0.1:443\x1b[0m\n\x1b[0;32mHeapster\x1b[0m is running at \x1b[0;33mhttps://100.64.0.1:443/api/v1/namespaces/kube-system/services/heapster/proxy\x1b[0m\n\x1b[0;32mCoreDNS\x1b[0m is running at \x1b[0;33mhttps://100.64.0.1:443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy\x1b[0m\n\x1b[0;32mkubernetes-dashboard\x1b[0m is running at \x1b[0;33mhttps://100.64.0.1:443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy\x1b[0m\n\x1b[0;32mMetrics-server\x1b[0m is running at \x1b[0;33mhttps://100.64.0.1:443/api/v1/namespaces/kube-system/services/https:metrics-server:/proxy\x1b[0m\n\x1b[0;32mtiller-deploy\x1b[0m is running at \x1b[0;33mhttps://100.64.0.1:443/api/v1/namespaces/kube-system/services/tiller-deploy:tiller/proxy\x1b[0m\n\nTo further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:04:52.449: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-9955" for this suite.
+Jun  6 14:04:58.460: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:04:58.527: INFO: namespace kubectl-9955 deletion completed in 6.075876634s
+
+• [SLOW TEST:6.440 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl cluster-info
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    should check if Kubernetes master services is included in cluster-info  [Conformance]
+    /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSS
+------------------------------
+[sig-api-machinery] Watchers 
+  should observe an object deletion if it stops meeting the requirements of the selector [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:04:58.527: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename watch
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in watch-9572
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should observe an object deletion if it stops meeting the requirements of the selector [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating a watch on configmaps with a certain label
+STEP: creating a new configmap
+STEP: modifying the configmap once
+STEP: changing the label value of the configmap
+STEP: Expecting to observe a delete notification for the watched object
+Jun  6 14:04:58.664: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:watch-9572,SelfLink:/api/v1/namespaces/watch-9572/configmaps/e2e-watch-test-label-changed,UID:11b2d664-8864-11e9-bdc9-0231d0af67bc,ResourceVersion:21314,Generation:0,CreationTimestamp:2019-06-06 14:04:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{},BinaryData:map[string][]byte{},}
+Jun  6 14:04:58.664: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:watch-9572,SelfLink:/api/v1/namespaces/watch-9572/configmaps/e2e-watch-test-label-changed,UID:11b2d664-8864-11e9-bdc9-0231d0af67bc,ResourceVersion:21315,Generation:0,CreationTimestamp:2019-06-06 14:04:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},}
+Jun  6 14:04:58.664: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:watch-9572,SelfLink:/api/v1/namespaces/watch-9572/configmaps/e2e-watch-test-label-changed,UID:11b2d664-8864-11e9-bdc9-0231d0af67bc,ResourceVersion:21316,Generation:0,CreationTimestamp:2019-06-06 14:04:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},}
+STEP: modifying the configmap a second time
+STEP: Expecting not to observe a notification because the object no longer meets the selector's requirements
+STEP: changing the label value of the configmap back
+STEP: modifying the configmap a third time
+STEP: deleting the configmap
+STEP: Expecting to observe an add notification for the watched object when the label value was restored
+Jun  6 14:05:08.680: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:watch-9572,SelfLink:/api/v1/namespaces/watch-9572/configmaps/e2e-watch-test-label-changed,UID:11b2d664-8864-11e9-bdc9-0231d0af67bc,ResourceVersion:21333,Generation:0,CreationTimestamp:2019-06-06 14:04:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+Jun  6 14:05:08.681: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:watch-9572,SelfLink:/api/v1/namespaces/watch-9572/configmaps/e2e-watch-test-label-changed,UID:11b2d664-8864-11e9-bdc9-0231d0af67bc,ResourceVersion:21334,Generation:0,CreationTimestamp:2019-06-06 14:04:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},}
+Jun  6 14:05:08.681: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:watch-9572,SelfLink:/api/v1/namespaces/watch-9572/configmaps/e2e-watch-test-label-changed,UID:11b2d664-8864-11e9-bdc9-0231d0af67bc,ResourceVersion:21335,Generation:0,CreationTimestamp:2019-06-06 14:04:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},}
+[AfterEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:05:08.681: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "watch-9572" for this suite.
+Jun  6 14:05:14.690: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:05:14.760: INFO: namespace watch-9572 deletion completed in 6.077522582s
+
+• [SLOW TEST:16.233 seconds]
+[sig-api-machinery] Watchers
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should observe an object deletion if it stops meeting the requirements of the selector [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSS
+------------------------------
+[k8s.io] Pods 
+  should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:05:14.761: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename pods
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in pods-7125
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:135
+[It] should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating the pod
+STEP: submitting the pod to kubernetes
+STEP: verifying the pod is in kubernetes
+STEP: updating the pod
+Jun  6 14:05:17.409: INFO: Successfully updated pod "pod-update-activedeadlineseconds-1b5fc17f-8864-11e9-b613-8a9bc7c14a19"
+Jun  6 14:05:17.409: INFO: Waiting up to 5m0s for pod "pod-update-activedeadlineseconds-1b5fc17f-8864-11e9-b613-8a9bc7c14a19" in namespace "pods-7125" to be "terminated due to deadline exceeded"
+Jun  6 14:05:17.413: INFO: Pod "pod-update-activedeadlineseconds-1b5fc17f-8864-11e9-b613-8a9bc7c14a19": Phase="Running", Reason="", readiness=true. Elapsed: 2.926481ms
+Jun  6 14:05:19.416: INFO: Pod "pod-update-activedeadlineseconds-1b5fc17f-8864-11e9-b613-8a9bc7c14a19": Phase="Running", Reason="", readiness=true. Elapsed: 2.006435965s
+Jun  6 14:05:21.420: INFO: Pod "pod-update-activedeadlineseconds-1b5fc17f-8864-11e9-b613-8a9bc7c14a19": Phase="Failed", Reason="DeadlineExceeded", readiness=false. Elapsed: 4.01030337s
+Jun  6 14:05:21.420: INFO: Pod "pod-update-activedeadlineseconds-1b5fc17f-8864-11e9-b613-8a9bc7c14a19" satisfied condition "terminated due to deadline exceeded"
+[AfterEach] [k8s.io] Pods
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:05:21.421: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pods-7125" for this suite.
+Jun  6 14:05:27.434: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:05:27.547: INFO: namespace pods-7125 deletion completed in 6.123364064s
+
+• [SLOW TEST:12.786 seconds]
+[k8s.io] Pods
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Pods 
+  should support retrieving logs from the container over websockets [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:05:27.547: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename pods
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in pods-2185
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:135
+[It] should support retrieving logs from the container over websockets [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+Jun  6 14:05:27.723: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: creating the pod
+STEP: submitting the pod to kubernetes
+[AfterEach] [k8s.io] Pods
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:05:29.751: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pods-2185" for this suite.
+Jun  6 14:06:19.760: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:06:19.968: INFO: namespace pods-2185 deletion completed in 50.213936344s
+
+• [SLOW TEST:52.420 seconds]
+[k8s.io] Pods
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should support retrieving logs from the container over websockets [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:06:19.968: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-4301
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward API volume plugin
+Jun  6 14:06:20.168: INFO: Waiting up to 5m0s for pod "downwardapi-volume-4247de43-8864-11e9-b613-8a9bc7c14a19" in namespace "projected-4301" to be "success or failure"
+Jun  6 14:06:20.172: INFO: Pod "downwardapi-volume-4247de43-8864-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 4.165815ms
+Jun  6 14:06:22.221: INFO: Pod "downwardapi-volume-4247de43-8864-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.053058796s
+STEP: Saw pod success
+Jun  6 14:06:22.221: INFO: Pod "downwardapi-volume-4247de43-8864-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:06:22.223: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod downwardapi-volume-4247de43-8864-11e9-b613-8a9bc7c14a19 container client-container: 
+STEP: delete the pod
+Jun  6 14:06:22.322: INFO: Waiting for pod downwardapi-volume-4247de43-8864-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:06:22.324: INFO: Pod downwardapi-volume-4247de43-8864-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:06:22.324: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-4301" for this suite.
+Jun  6 14:06:28.334: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:06:28.470: INFO: namespace projected-4301 deletion completed in 6.142530451s
+
+• [SLOW TEST:8.502 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Kubelet when scheduling a busybox command in a pod 
+  should print the output to logs [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:06:28.470: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename kubelet-test
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in kubelet-test-9144
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37
+[It] should print the output to logs [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[AfterEach] [k8s.io] Kubelet
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:06:32.623: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubelet-test-9144" for this suite.
+Jun  6 14:07:16.633: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:07:16.750: INFO: namespace kubelet-test-9144 deletion completed in 44.124553659s
+
+• [SLOW TEST:48.280 seconds]
+[k8s.io] Kubelet
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  when scheduling a busybox command in a pod
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:40
+    should print the output to logs [NodeConformance] [Conformance]
+    /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl describe 
+  should check if kubectl describe prints relevant information for rc and pods  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:07:16.750: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in kubectl-6243
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213
+[It] should check if kubectl describe prints relevant information for rc and pods  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+Jun  6 14:07:16.874: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 version --client'
+Jun  6 14:07:16.920: INFO: stderr: ""
+Jun  6 14:07:16.920: INFO: stdout: "Client Version: version.Info{Major:\"1\", Minor:\"14\", GitVersion:\"v1.14.2\", GitCommit:\"66049e3b21efe110454d67df4fa62b08ea79a19b\", GitTreeState:\"clean\", BuildDate:\"2019-05-16T16:23:09Z\", GoVersion:\"go1.12.5\", Compiler:\"gc\", Platform:\"linux/amd64\"}\n"
+Jun  6 14:07:16.922: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 create -f - --namespace=kubectl-6243'
+Jun  6 14:07:17.080: INFO: stderr: ""
+Jun  6 14:07:17.080: INFO: stdout: "replicationcontroller/redis-master created\n"
+Jun  6 14:07:17.080: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 create -f - --namespace=kubectl-6243'
+Jun  6 14:07:17.245: INFO: stderr: ""
+Jun  6 14:07:17.245: INFO: stdout: "service/redis-master created\n"
+STEP: Waiting for Redis master to start.
+Jun  6 14:07:18.248: INFO: Selector matched 1 pods for map[app:redis]
+Jun  6 14:07:18.248: INFO: Found 0 / 1
+Jun  6 14:07:19.249: INFO: Selector matched 1 pods for map[app:redis]
+Jun  6 14:07:19.249: INFO: Found 1 / 1
+Jun  6 14:07:19.249: INFO: WaitFor completed with timeout 5m0s.  Pods found = 1 out of 1
+Jun  6 14:07:19.251: INFO: Selector matched 1 pods for map[app:redis]
+Jun  6 14:07:19.251: INFO: ForEach: Found 1 pods from the filter.  Now looping through them.
+Jun  6 14:07:19.251: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 describe pod redis-master-cc8vw --namespace=kubectl-6243'
+Jun  6 14:07:19.323: INFO: stderr: ""
+Jun  6 14:07:19.323: INFO: stdout: "Name:               redis-master-cc8vw\nNamespace:          kubectl-6243\nPriority:           0\nPriorityClassName:  \nNode:               ip-172-16-66-200.ec2.internal/172.16.66.200\nStart Time:         Thu, 06 Jun 2019 14:07:17 +0000\nLabels:             app=redis\n                    role=master\nAnnotations:        cni.projectcalico.org/podIP: 100.96.2.197/32\n                    kubernetes.io/psp: e2e-test-privileged-psp\nStatus:             Running\nIP:                 100.96.2.197\nControlled By:      ReplicationController/redis-master\nContainers:\n  redis-master:\n    Container ID:   docker://fb05efda0973f55a679e009b718771e59f834e756e84e31d5a87bb1815344f8d\n    Image:          gcr.io/kubernetes-e2e-test-images/redis:1.0\n    Image ID:       docker-pullable://gcr.io/kubernetes-e2e-test-images/redis@sha256:af4748d1655c08dc54d4be5182135395db9ce87aba2d4699b26b14ae197c5830\n    Port:           6379/TCP\n    Host Port:      0/TCP\n    State:          Running\n      Started:      Thu, 06 Jun 2019 14:07:18 +0000\n    Ready:          True\n    Restart Count:  0\n    Environment:    \n    Mounts:\n      /var/run/secrets/kubernetes.io/serviceaccount from default-token-xrcgz (ro)\nConditions:\n  Type              Status\n  Initialized       True \n  Ready             True \n  ContainersReady   True \n  PodScheduled      True \nVolumes:\n  default-token-xrcgz:\n    Type:        Secret (a volume populated by a Secret)\n    SecretName:  default-token-xrcgz\n    Optional:    false\nQoS Class:       BestEffort\nNode-Selectors:  \nTolerations:     node.kubernetes.io/not-ready:NoExecute for 300s\n                 node.kubernetes.io/unreachable:NoExecute for 300s\nEvents:\n  Type    Reason     Age   From                                    Message\n  ----    ------     ----  ----                                    -------\n  Normal  Scheduled  2s    default-scheduler                       Successfully assigned kubectl-6243/redis-master-cc8vw to ip-172-16-66-200.ec2.internal\n  Normal  Pulled     2s    kubelet, ip-172-16-66-200.ec2.internal  Container image \"gcr.io/kubernetes-e2e-test-images/redis:1.0\" already present on machine\n  Normal  Created    2s    kubelet, ip-172-16-66-200.ec2.internal  Created container redis-master\n  Normal  Started    1s    kubelet, ip-172-16-66-200.ec2.internal  Started container redis-master\n"
+Jun  6 14:07:19.323: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 describe rc redis-master --namespace=kubectl-6243'
+Jun  6 14:07:19.402: INFO: stderr: ""
+Jun  6 14:07:19.402: INFO: stdout: "Name:         redis-master\nNamespace:    kubectl-6243\nSelector:     app=redis,role=master\nLabels:       app=redis\n              role=master\nAnnotations:  \nReplicas:     1 current / 1 desired\nPods Status:  1 Running / 0 Waiting / 0 Succeeded / 0 Failed\nPod Template:\n  Labels:  app=redis\n           role=master\n  Containers:\n   redis-master:\n    Image:        gcr.io/kubernetes-e2e-test-images/redis:1.0\n    Port:         6379/TCP\n    Host Port:    0/TCP\n    Environment:  \n    Mounts:       \n  Volumes:        \nEvents:\n  Type    Reason            Age   From                    Message\n  ----    ------            ----  ----                    -------\n  Normal  SuccessfulCreate  2s    replication-controller  Created pod: redis-master-cc8vw\n"
+Jun  6 14:07:19.402: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 describe service redis-master --namespace=kubectl-6243'
+Jun  6 14:07:19.477: INFO: stderr: ""
+Jun  6 14:07:19.477: INFO: stdout: "Name:              redis-master\nNamespace:         kubectl-6243\nLabels:            app=redis\n                   role=master\nAnnotations:       \nSelector:          app=redis,role=master\nType:              ClusterIP\nIP:                100.64.220.135\nPort:                6379/TCP\nTargetPort:        redis-server/TCP\nEndpoints:         100.96.2.197:6379\nSession Affinity:  None\nEvents:            \n"
+Jun  6 14:07:19.480: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 describe node ip-172-16-66-200.ec2.internal'
+Jun  6 14:07:19.578: INFO: stderr: ""
+Jun  6 14:07:19.578: INFO: stdout: "Name:               ip-172-16-66-200.ec2.internal\nRoles:              node\nLabels:             beta.kubernetes.io/arch=amd64\n                    beta.kubernetes.io/instance-type=t2.large\n                    beta.kubernetes.io/os=linux\n                    failure-domain.beta.kubernetes.io/region=us-east-1\n                    failure-domain.beta.kubernetes.io/zone=us-east-1d\n                    kubernetes.io/arch=amd64\n                    kubernetes.io/hostname=ip-172-16-66-200\n                    kubernetes.io/os=linux\n                    kubernetes.io/role=node\n                    kublr.io/node-group=default\n                    node-role.kubernetes.io/node=\nAnnotations:        flannel.alpha.coreos.com/backend-data: {\"VtepMAC\":\"c2:e0:9a:cf:93:80\"}\n                    flannel.alpha.coreos.com/backend-type: vxlan\n                    flannel.alpha.coreos.com/kube-subnet-manager: true\n                    flannel.alpha.coreos.com/public-ip: 172.16.66.200\n                    node.alpha.kubernetes.io/ttl: 0\n                    projectcalico.org/IPv4IPIPTunnelAddr: 100.96.2.1\n                    volumes.kubernetes.io/controller-managed-attach-detach: true\nCreationTimestamp:  Thu, 06 Jun 2019 12:01:49 +0000\nTaints:             \nUnschedulable:      false\nConditions:\n  Type             Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message\n  ----             ------  -----------------                 ------------------                ------                       -------\n  MemoryPressure   False   Thu, 06 Jun 2019 14:07:05 +0000   Thu, 06 Jun 2019 12:01:49 +0000   KubeletHasSufficientMemory   kubelet has sufficient memory available\n  DiskPressure     False   Thu, 06 Jun 2019 14:07:05 +0000   Thu, 06 Jun 2019 12:01:49 +0000   KubeletHasNoDiskPressure     kubelet has no disk pressure\n  PIDPressure      False   Thu, 06 Jun 2019 14:07:05 +0000   Thu, 06 Jun 2019 12:01:49 +0000   KubeletHasSufficientPID      kubelet has sufficient PID available\n  Ready            True    Thu, 06 Jun 2019 14:07:05 +0000   Thu, 06 Jun 2019 12:02:59 +0000   KubeletReady                 kubelet is posting ready status. AppArmor enabled\nAddresses:\n  InternalIP:   172.16.66.200\n  ExternalIP:   3.215.175.37\n  InternalDNS:  ip-172-16-66-200.ec2.internal\n  ExternalDNS:  ec2-3-215-175-37.compute-1.amazonaws.com\n  Hostname:     ip-172-16-66-200\nCapacity:\n attachable-volumes-aws-ebs:  39\n cpu:                         2\n ephemeral-storage:           15181020Ki\n hugepages-2Mi:               0\n memory:                      8173692Ki\n pods:                        110\nAllocatable:\n attachable-volumes-aws-ebs:  39\n cpu:                         2\n ephemeral-storage:           13990828009\n hugepages-2Mi:               0\n memory:                      6760572Ki\n pods:                        110\nSystem Info:\n Machine ID:                 4e46fbc9b8e840d688adffc4327ff1ba\n System UUID:                EC2F69D2-879A-9F79-12C6-69E15A4FBAA3\n Boot ID:                    dff68fc0-09c3-4f62-858b-7279e1ce309b\n Kernel Version:             4.4.0-1084-aws\n OS Image:                   Ubuntu 16.04.6 LTS\n Operating System:           linux\n Architecture:               amd64\n Container Runtime Version:  docker://18.9.5\n Kubelet Version:            v1.14.2\n Kube-Proxy Version:         v1.14.2\nPodCIDR:                     100.96.2.0/24\nProviderID:                  aws:///us-east-1d/i-0dc3176eed4eda3e1\nNon-terminated Pods:         (12 in total)\n  Namespace                  Name                                                                                                              CPU Requests  CPU Limits  Memory Requests  Memory Limits  AGE\n  ---------                  ----                                                                                                              ------------  ----------  ---------------  -------------  ---\n  heptio-sonobuoy            sonobuoy-systemd-logs-daemon-set-bc8f4f63e26f462d-d4vcf                                                           0 (0%)        0 (0%)      0 (0%)           0 (0%)         72m\n  kube-system                canal-kgff5                                                                                                       40m (2%)      0 (0%)      112Mi (1%)       112Mi (1%)     124m\n  kube-system                heapster-v1.6.0-beta.1-6979f49998-zrlsp                                                                           138m (6%)     138m (6%)   324Mi (4%)       324Mi (4%)     123m\n  kube-system                k8s-api-haproxy-313ee916843387945fe68a625784d2a07122c117ee63e285821800170e69f652-ip-172-16-66-200.ec2.internal    1m (0%)       0 (0%)      20Mi (0%)        20Mi (0%)      124m\n  kube-system                kube-proxy-7a09f3d398339426fb2660a3d58c4b6a781901227d4954ccce4069e834b95d61-ip-172-16-66-200.ec2.internal         5m (0%)       250m (12%)  48Mi (0%)        48Mi (0%)      125m\n  kube-system                kublr-logging-fluentd-es-v2.0.2-pl5tm                                                                             150m (7%)     0 (0%)      512Mi (7%)       512Mi (7%)     123m\n  kube-system                kublr-logging-rabbitmq-0                                                                                          400m (20%)    0 (0%)      512Mi (7%)       1Gi (15%)      123m\n  kube-system                kublr-logging-rabbitmq-exporter-85b669fcb9-dv2t2                                                                  10m (0%)      100m (5%)   20Mi (0%)        128Mi (1%)     123m\n  kube-system                kublr-monitoring-kube-state-metrics-6fb9c7594b-zqb9d                                                              113m (5%)     203m (10%)  136Mi (2%)       136Mi (2%)     123m\n  kube-system                metrics-server-v0.3.1-7f597fc6fd-ljsdj                                                                            98m (4%)      148m (7%)   204Mi (3%)       404Mi (6%)     124m\n  kube-system                node-local-dns-vq5mj                                                                                              25m (1%)      0 (0%)      5Mi (0%)         30Mi (0%)      124m\n  kubectl-6243               redis-master-cc8vw                                                                                                0 (0%)        0 (0%)      0 (0%)           0 (0%)         2s\nAllocated resources:\n  (Total limits may be over 100 percent, i.e., overcommitted.)\n  Resource                    Requests      Limits\n  --------                    --------      ------\n  cpu                         980m (49%)    839m (41%)\n  memory                      1893Mi (28%)  2738Mi (41%)\n  ephemeral-storage           0 (0%)        0 (0%)\n  attachable-volumes-aws-ebs  0             0\nEvents:                       \n"
+Jun  6 14:07:19.579: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 describe namespace kubectl-6243'
+Jun  6 14:07:19.645: INFO: stderr: ""
+Jun  6 14:07:19.645: INFO: stdout: "Name:         kubectl-6243\nLabels:       e2e-framework=kubectl\n              e2e-run=45bf6e69-885a-11e9-b613-8a9bc7c14a19\nAnnotations:  \nStatus:       Active\n\nNo resource quota.\n\nNo resource limits.\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:07:19.645: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-6243" for this suite.
+Jun  6 14:07:41.655: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:07:41.748: INFO: namespace kubectl-6243 deletion completed in 22.100054099s
+
+• [SLOW TEST:24.998 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl describe
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    should check if kubectl describe prints relevant information for rc and pods  [Conformance]
+    /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSS
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:07:41.748: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename gc
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in gc-1870
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: create the rc1
+STEP: create the rc2
+STEP: set half of pods created by rc simpletest-rc-to-be-deleted to have rc simpletest-rc-to-stay as owner as well
+STEP: delete the rc simpletest-rc-to-be-deleted
+STEP: wait for the rc to be deleted
+STEP: Gathering metrics
+W0606 14:07:52.062815      14 metrics_grabber.go:79] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
+Jun  6 14:07:52.062: INFO: For apiserver_request_total:
+For apiserver_request_latencies_summary:
+For apiserver_init_events_total:
+For garbage_collector_attempt_to_delete_queue_latency:
+For garbage_collector_attempt_to_delete_work_duration:
+For garbage_collector_attempt_to_orphan_queue_latency:
+For garbage_collector_attempt_to_orphan_work_duration:
+For garbage_collector_dirty_processing_latency_microseconds:
+For garbage_collector_event_processing_latency_microseconds:
+For garbage_collector_graph_changes_queue_latency:
+For garbage_collector_graph_changes_work_duration:
+For garbage_collector_orphan_processing_latency_microseconds:
+For namespace_queue_latency:
+For namespace_queue_latency_sum:
+For namespace_queue_latency_count:
+For namespace_retries:
+For namespace_work_duration:
+For namespace_work_duration_sum:
+For namespace_work_duration_count:
+For function_duration_seconds:
+For errors_total:
+For evicted_pods_total:
+
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:07:52.062: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "gc-1870" for this suite.
+Jun  6 14:07:58.072: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:07:58.152: INFO: namespace gc-1870 deletion completed in 6.086953201s
+
+• [SLOW TEST:16.404 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSS
+------------------------------
+[sig-apps] Daemon set [Serial] 
+  should run and stop simple daemon [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:07:58.152: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename daemonsets
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in daemonsets-3380
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:102
+[It] should run and stop simple daemon [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating simple DaemonSet "daemon-set"
+STEP: Check that daemon pods launch on every node of the cluster.
+Jun  6 14:07:58.346: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 14:07:58.349: INFO: Number of nodes with available pods: 0
+Jun  6 14:07:58.349: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod
+Jun  6 14:07:59.352: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 14:07:59.354: INFO: Number of nodes with available pods: 0
+Jun  6 14:07:59.354: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod
+Jun  6 14:08:00.352: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 14:08:00.355: INFO: Number of nodes with available pods: 1
+Jun  6 14:08:00.355: INFO: Node ip-172-16-66-200.ec2.internal is running more than one daemon pod
+Jun  6 14:08:01.353: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 14:08:01.355: INFO: Number of nodes with available pods: 2
+Jun  6 14:08:01.355: INFO: Number of running nodes: 2, number of available pods: 2
+STEP: Stop a daemon pod, check that the daemon pod is revived.
+Jun  6 14:08:01.369: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 14:08:01.372: INFO: Number of nodes with available pods: 1
+Jun  6 14:08:01.372: INFO: Node ip-172-16-89-18.ec2.internal is running more than one daemon pod
+Jun  6 14:08:02.375: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 14:08:02.377: INFO: Number of nodes with available pods: 1
+Jun  6 14:08:02.377: INFO: Node ip-172-16-89-18.ec2.internal is running more than one daemon pod
+Jun  6 14:08:03.375: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 14:08:03.377: INFO: Number of nodes with available pods: 1
+Jun  6 14:08:03.377: INFO: Node ip-172-16-89-18.ec2.internal is running more than one daemon pod
+Jun  6 14:08:04.375: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 14:08:04.377: INFO: Number of nodes with available pods: 1
+Jun  6 14:08:04.377: INFO: Node ip-172-16-89-18.ec2.internal is running more than one daemon pod
+Jun  6 14:08:05.375: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 14:08:05.377: INFO: Number of nodes with available pods: 1
+Jun  6 14:08:05.377: INFO: Node ip-172-16-89-18.ec2.internal is running more than one daemon pod
+Jun  6 14:08:06.375: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 14:08:06.377: INFO: Number of nodes with available pods: 1
+Jun  6 14:08:06.377: INFO: Node ip-172-16-89-18.ec2.internal is running more than one daemon pod
+Jun  6 14:08:07.375: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 14:08:07.377: INFO: Number of nodes with available pods: 1
+Jun  6 14:08:07.377: INFO: Node ip-172-16-89-18.ec2.internal is running more than one daemon pod
+Jun  6 14:08:08.375: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 14:08:08.377: INFO: Number of nodes with available pods: 1
+Jun  6 14:08:08.377: INFO: Node ip-172-16-89-18.ec2.internal is running more than one daemon pod
+Jun  6 14:08:09.375: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 14:08:09.377: INFO: Number of nodes with available pods: 1
+Jun  6 14:08:09.377: INFO: Node ip-172-16-89-18.ec2.internal is running more than one daemon pod
+Jun  6 14:08:10.375: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 14:08:10.377: INFO: Number of nodes with available pods: 1
+Jun  6 14:08:10.377: INFO: Node ip-172-16-89-18.ec2.internal is running more than one daemon pod
+Jun  6 14:08:11.375: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 14:08:11.377: INFO: Number of nodes with available pods: 1
+Jun  6 14:08:11.377: INFO: Node ip-172-16-89-18.ec2.internal is running more than one daemon pod
+Jun  6 14:08:12.375: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 14:08:12.377: INFO: Number of nodes with available pods: 1
+Jun  6 14:08:12.377: INFO: Node ip-172-16-89-18.ec2.internal is running more than one daemon pod
+Jun  6 14:08:13.374: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 14:08:13.377: INFO: Number of nodes with available pods: 1
+Jun  6 14:08:13.377: INFO: Node ip-172-16-89-18.ec2.internal is running more than one daemon pod
+Jun  6 14:08:14.375: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 14:08:14.421: INFO: Number of nodes with available pods: 1
+Jun  6 14:08:14.421: INFO: Node ip-172-16-89-18.ec2.internal is running more than one daemon pod
+Jun  6 14:08:15.375: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 14:08:15.377: INFO: Number of nodes with available pods: 1
+Jun  6 14:08:15.377: INFO: Node ip-172-16-89-18.ec2.internal is running more than one daemon pod
+Jun  6 14:08:16.374: INFO: DaemonSet pods can't tolerate node ip-172-16-9-161.ec2.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node
+Jun  6 14:08:16.376: INFO: Number of nodes with available pods: 2
+Jun  6 14:08:16.376: INFO: Number of running nodes: 2, number of available pods: 2
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:68
+STEP: Deleting DaemonSet "daemon-set"
+STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-3380, will wait for the garbage collector to delete the pods
+Jun  6 14:08:16.435: INFO: Deleting DaemonSet.extensions daemon-set took: 5.084946ms
+Jun  6 14:08:16.835: INFO: Terminating DaemonSet.extensions daemon-set pods took: 400.165873ms
+Jun  6 14:08:25.837: INFO: Number of nodes with available pods: 0
+Jun  6 14:08:25.837: INFO: Number of running nodes: 0, number of available pods: 0
+Jun  6 14:08:25.839: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/daemonsets-3380/daemonsets","resourceVersion":"22091"},"items":null}
+
+Jun  6 14:08:25.842: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/daemonsets-3380/pods","resourceVersion":"22091"},"items":null}
+
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:08:25.849: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "daemonsets-3380" for this suite.
+Jun  6 14:08:31.858: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:08:31.946: INFO: namespace daemonsets-3380 deletion completed in 6.095677136s
+
+• [SLOW TEST:33.794 seconds]
+[sig-apps] Daemon set [Serial]
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should run and stop simple daemon [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Proxy server 
+  should support --unix-socket=/path  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:08:31.947: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in kubectl-5660
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213
+[It] should support --unix-socket=/path  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Starting the proxy
+Jun  6 14:08:32.070: INFO: Asynchronously running '/usr/local/bin/kubectl kubectl --kubeconfig=/tmp/kubeconfig-041581163 proxy --unix-socket=/tmp/kubectl-proxy-unix374013410/test'
+STEP: retrieving proxy /api/ output
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:08:32.117: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-5660" for this suite.
+Jun  6 14:08:38.128: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:08:38.249: INFO: namespace kubectl-5660 deletion completed in 6.128069733s
+
+• [SLOW TEST:6.302 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Proxy server
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    should support --unix-socket=/path  [Conformance]
+    /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:08:38.249: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in emptydir-2450
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test emptydir 0666 on node default medium
+Jun  6 14:08:38.382: INFO: Waiting up to 5m0s for pod "pod-94a96384-8864-11e9-b613-8a9bc7c14a19" in namespace "emptydir-2450" to be "success or failure"
+Jun  6 14:08:38.384: INFO: Pod "pod-94a96384-8864-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 1.876616ms
+Jun  6 14:08:40.387: INFO: Pod "pod-94a96384-8864-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.004700896s
+STEP: Saw pod success
+Jun  6 14:08:40.387: INFO: Pod "pod-94a96384-8864-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:08:40.389: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-94a96384-8864-11e9-b613-8a9bc7c14a19 container test-container: 
+STEP: delete the pod
+Jun  6 14:08:40.405: INFO: Waiting for pod pod-94a96384-8864-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:08:40.407: INFO: Pod pod-94a96384-8864-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:08:40.407: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-2450" for this suite.
+Jun  6 14:08:46.432: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:08:46.648: INFO: namespace emptydir-2450 deletion completed in 6.238425418s
+
+• [SLOW TEST:8.399 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should delete RS created by deployment when not orphaning [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:08:46.648: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename gc
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in gc-8372
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should delete RS created by deployment when not orphaning [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: create the deployment
+STEP: Wait for the Deployment to create new ReplicaSet
+STEP: delete the deployment
+STEP: wait for all rs to be garbage collected
+STEP: expected 0 rs, got 1 rs
+STEP: expected 0 pods, got 2 pods
+STEP: Gathering metrics
+W0606 14:08:47.804444      14 metrics_grabber.go:79] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
+Jun  6 14:08:47.804: INFO: For apiserver_request_total:
+For apiserver_request_latencies_summary:
+For apiserver_init_events_total:
+For garbage_collector_attempt_to_delete_queue_latency:
+For garbage_collector_attempt_to_delete_work_duration:
+For garbage_collector_attempt_to_orphan_queue_latency:
+For garbage_collector_attempt_to_orphan_work_duration:
+For garbage_collector_dirty_processing_latency_microseconds:
+For garbage_collector_event_processing_latency_microseconds:
+For garbage_collector_graph_changes_queue_latency:
+For garbage_collector_graph_changes_work_duration:
+For garbage_collector_orphan_processing_latency_microseconds:
+For namespace_queue_latency:
+For namespace_queue_latency_sum:
+For namespace_queue_latency_count:
+For namespace_retries:
+For namespace_work_duration:
+For namespace_work_duration_sum:
+For namespace_work_duration_count:
+For function_duration_seconds:
+For errors_total:
+For evicted_pods_total:
+
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:08:47.804: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "gc-8372" for this suite.
+Jun  6 14:08:53.815: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:08:53.883: INFO: namespace gc-8372 deletion completed in 6.077122979s
+
+• [SLOW TEST:7.235 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should delete RS created by deployment when not orphaning [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:08:53.884: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in downward-api-8005
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward API volume plugin
+Jun  6 14:08:54.016: INFO: Waiting up to 5m0s for pod "downwardapi-volume-9dfafe47-8864-11e9-b613-8a9bc7c14a19" in namespace "downward-api-8005" to be "success or failure"
+Jun  6 14:08:54.029: INFO: Pod "downwardapi-volume-9dfafe47-8864-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 12.838207ms
+Jun  6 14:08:56.035: INFO: Pod "downwardapi-volume-9dfafe47-8864-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.018374593s
+STEP: Saw pod success
+Jun  6 14:08:56.035: INFO: Pod "downwardapi-volume-9dfafe47-8864-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:08:56.037: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod downwardapi-volume-9dfafe47-8864-11e9-b613-8a9bc7c14a19 container client-container: 
+STEP: delete the pod
+Jun  6 14:08:56.057: INFO: Waiting for pod downwardapi-volume-9dfafe47-8864-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:08:56.059: INFO: Pod downwardapi-volume-9dfafe47-8864-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:08:56.059: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-8005" for this suite.
+Jun  6 14:09:02.068: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:09:02.144: INFO: namespace downward-api-8005 deletion completed in 6.08325184s
+
+• [SLOW TEST:8.261 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSS
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:09:02.145: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename gc
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in gc-6100
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: create the rc
+STEP: delete the rc
+STEP: wait for the rc to be deleted
+STEP: Gathering metrics
+W0606 14:09:08.287267      14 metrics_grabber.go:79] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
+Jun  6 14:09:08.287: INFO: For apiserver_request_total:
+For apiserver_request_latencies_summary:
+For apiserver_init_events_total:
+For garbage_collector_attempt_to_delete_queue_latency:
+For garbage_collector_attempt_to_delete_work_duration:
+For garbage_collector_attempt_to_orphan_queue_latency:
+For garbage_collector_attempt_to_orphan_work_duration:
+For garbage_collector_dirty_processing_latency_microseconds:
+For garbage_collector_event_processing_latency_microseconds:
+For garbage_collector_graph_changes_queue_latency:
+For garbage_collector_graph_changes_work_duration:
+For garbage_collector_orphan_processing_latency_microseconds:
+For namespace_queue_latency:
+For namespace_queue_latency_sum:
+For namespace_queue_latency_count:
+For namespace_retries:
+For namespace_work_duration:
+For namespace_work_duration_sum:
+For namespace_work_duration_count:
+For function_duration_seconds:
+For errors_total:
+For evicted_pods_total:
+
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:09:08.287: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "gc-6100" for this suite.
+Jun  6 14:09:14.297: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:09:14.362: INFO: namespace gc-6100 deletion completed in 6.073092038s
+
+• [SLOW TEST:12.217 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SS
+------------------------------
+[sig-storage] Projected secret 
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected secret
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:09:14.362: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-7505
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating projection with secret that has name projected-secret-test-map-aa3cc2f0-8864-11e9-b613-8a9bc7c14a19
+STEP: Creating a pod to test consume secrets
+Jun  6 14:09:14.583: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-aa3d47c3-8864-11e9-b613-8a9bc7c14a19" in namespace "projected-7505" to be "success or failure"
+Jun  6 14:09:14.597: INFO: Pod "pod-projected-secrets-aa3d47c3-8864-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 4.640997ms
+Jun  6 14:09:16.600: INFO: Pod "pod-projected-secrets-aa3d47c3-8864-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.007389153s
+Jun  6 14:09:18.603: INFO: Pod "pod-projected-secrets-aa3d47c3-8864-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.00974471s
+STEP: Saw pod success
+Jun  6 14:09:18.603: INFO: Pod "pod-projected-secrets-aa3d47c3-8864-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:09:18.604: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-projected-secrets-aa3d47c3-8864-11e9-b613-8a9bc7c14a19 container projected-secret-volume-test: 
+STEP: delete the pod
+Jun  6 14:09:18.617: INFO: Waiting for pod pod-projected-secrets-aa3d47c3-8864-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:09:18.619: INFO: Pod pod-projected-secrets-aa3d47c3-8864-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] Projected secret
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:09:18.619: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-7505" for this suite.
+Jun  6 14:09:24.628: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:09:24.719: INFO: namespace projected-7505 deletion completed in 6.09789997s
+
+• [SLOW TEST:10.357 seconds]
+[sig-storage] Projected secret
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:33
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SS
+------------------------------
+[k8s.io] Probing container 
+  should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:09:24.719: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename container-probe
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in container-probe-1588
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:51
+[It] should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating pod liveness-exec in namespace container-probe-1588
+Jun  6 14:09:28.855: INFO: Started pod liveness-exec in namespace container-probe-1588
+STEP: checking the pod's current state and verifying that restartCount is present
+Jun  6 14:09:28.856: INFO: Initial restart count of pod liveness-exec is 0
+STEP: deleting the pod
+[AfterEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:13:29.313: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-probe-1588" for this suite.
+Jun  6 14:13:35.325: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:13:35.448: INFO: namespace container-probe-1588 deletion completed in 6.131986203s
+
+• [SLOW TEST:250.729 seconds]
+[k8s.io] Probing container
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Kubelet when scheduling a busybox command that always fails in a pod 
+  should be possible to delete [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:13:35.449: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename kubelet-test
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in kubelet-test-1168
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37
+[BeforeEach] when scheduling a busybox command that always fails in a pod
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:81
+[It] should be possible to delete [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[AfterEach] [k8s.io] Kubelet
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:13:35.636: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubelet-test-1168" for this suite.
+Jun  6 14:13:57.652: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:13:57.735: INFO: namespace kubelet-test-1168 deletion completed in 22.094281195s
+
+• [SLOW TEST:22.286 seconds]
+[k8s.io] Kubelet
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  when scheduling a busybox command that always fails in a pod
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:78
+    should be possible to delete [NodeConformance] [Conformance]
+    /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should provide container's memory limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:13:57.735: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in downward-api-6882
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should provide container's memory limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward API volume plugin
+Jun  6 14:13:58.065: INFO: Waiting up to 5m0s for pod "downwardapi-volume-53354757-8865-11e9-b613-8a9bc7c14a19" in namespace "downward-api-6882" to be "success or failure"
+Jun  6 14:13:58.069: INFO: Pod "downwardapi-volume-53354757-8865-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 4.229181ms
+Jun  6 14:14:00.072: INFO: Pod "downwardapi-volume-53354757-8865-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.007027001s
+Jun  6 14:14:02.075: INFO: Pod "downwardapi-volume-53354757-8865-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.009987137s
+STEP: Saw pod success
+Jun  6 14:14:02.075: INFO: Pod "downwardapi-volume-53354757-8865-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:14:02.077: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod downwardapi-volume-53354757-8865-11e9-b613-8a9bc7c14a19 container client-container: 
+STEP: delete the pod
+Jun  6 14:14:02.093: INFO: Waiting for pod downwardapi-volume-53354757-8865-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:14:02.095: INFO: Pod downwardapi-volume-53354757-8865-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:14:02.095: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-6882" for this suite.
+Jun  6 14:14:08.105: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:14:08.248: INFO: namespace downward-api-6882 deletion completed in 6.151336229s
+
+• [SLOW TEST:10.513 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should provide container's memory limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should update annotations on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:14:08.249: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in downward-api-5367
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should update annotations on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating the pod
+Jun  6 14:14:12.962: INFO: Successfully updated pod "annotationupdate5963056a-8865-11e9-b613-8a9bc7c14a19"
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:14:14.982: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-5367" for this suite.
+Jun  6 14:14:36.992: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:14:37.060: INFO: namespace downward-api-5367 deletion completed in 22.075348079s
+
+• [SLOW TEST:28.811 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should update annotations on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSS
+------------------------------
+[sig-storage] Projected secret 
+  should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected secret
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:14:37.060: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-1440
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating projection with secret that has name projected-secret-test-6a87b538-8865-11e9-b613-8a9bc7c14a19
+STEP: Creating a pod to test consume secrets
+Jun  6 14:14:37.195: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-6a8822c1-8865-11e9-b613-8a9bc7c14a19" in namespace "projected-1440" to be "success or failure"
+Jun  6 14:14:37.199: INFO: Pod "pod-projected-secrets-6a8822c1-8865-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 4.363808ms
+Jun  6 14:14:39.202: INFO: Pod "pod-projected-secrets-6a8822c1-8865-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.007104529s
+Jun  6 14:14:41.205: INFO: Pod "pod-projected-secrets-6a8822c1-8865-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.010095215s
+STEP: Saw pod success
+Jun  6 14:14:41.205: INFO: Pod "pod-projected-secrets-6a8822c1-8865-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:14:41.207: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-projected-secrets-6a8822c1-8865-11e9-b613-8a9bc7c14a19 container projected-secret-volume-test: 
+STEP: delete the pod
+Jun  6 14:14:41.220: INFO: Waiting for pod pod-projected-secrets-6a8822c1-8865-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:14:41.222: INFO: Pod pod-projected-secrets-6a8822c1-8865-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] Projected secret
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:14:41.222: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-1440" for this suite.
+Jun  6 14:14:47.231: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:14:47.326: INFO: namespace projected-1440 deletion completed in 6.101147174s
+
+• [SLOW TEST:10.266 seconds]
+[sig-storage] Projected secret
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:33
+  should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should orphan pods created by rc if delete options say so [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:14:47.326: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename gc
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in gc-5443
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should orphan pods created by rc if delete options say so [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: create the rc
+STEP: delete the rc
+STEP: wait for the rc to be deleted
+STEP: wait for 30 seconds to see if the garbage collector mistakenly deletes the pods
+STEP: Gathering metrics
+W0606 14:15:27.471144      14 metrics_grabber.go:79] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
+Jun  6 14:15:27.471: INFO: For apiserver_request_total:
+For apiserver_request_latencies_summary:
+For apiserver_init_events_total:
+For garbage_collector_attempt_to_delete_queue_latency:
+For garbage_collector_attempt_to_delete_work_duration:
+For garbage_collector_attempt_to_orphan_queue_latency:
+For garbage_collector_attempt_to_orphan_work_duration:
+For garbage_collector_dirty_processing_latency_microseconds:
+For garbage_collector_event_processing_latency_microseconds:
+For garbage_collector_graph_changes_queue_latency:
+For garbage_collector_graph_changes_work_duration:
+For garbage_collector_orphan_processing_latency_microseconds:
+For namespace_queue_latency:
+For namespace_queue_latency_sum:
+For namespace_queue_latency_count:
+For namespace_retries:
+For namespace_work_duration:
+For namespace_work_duration_sum:
+For namespace_work_duration_count:
+For function_duration_seconds:
+For errors_total:
+For evicted_pods_total:
+
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:15:27.471: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "gc-5443" for this suite.
+Jun  6 14:15:33.525: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:15:33.648: INFO: namespace gc-5443 deletion completed in 6.175403649s
+
+• [SLOW TEST:46.322 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should orphan pods created by rc if delete options say so [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Pods 
+  should get a host IP [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:15:33.649: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename pods
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in pods-2315
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:135
+[It] should get a host IP [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating pod
+Jun  6 14:15:37.797: INFO: Pod pod-hostip-8c430165-8865-11e9-b613-8a9bc7c14a19 has hostIP: 172.16.66.200
+[AfterEach] [k8s.io] Pods
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:15:37.797: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pods-2315" for this suite.
+Jun  6 14:15:59.809: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:15:59.945: INFO: namespace pods-2315 deletion completed in 22.145122307s
+
+• [SLOW TEST:26.296 seconds]
+[k8s.io] Pods
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should get a host IP [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-network] Proxy version v1 
+  should proxy through a service and a pod  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] version v1
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:15:59.945: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename proxy
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in proxy-5451
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should proxy through a service and a pod  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: starting an echo server on multiple ports
+STEP: creating replication controller proxy-service-d7hv4 in namespace proxy-5451
+I0606 14:16:00.082284      14 runners.go:184] Created replication controller with name: proxy-service-d7hv4, namespace: proxy-5451, replica count: 1
+I0606 14:16:01.132657      14 runners.go:184] proxy-service-d7hv4 Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady 
+I0606 14:16:02.132856      14 runners.go:184] proxy-service-d7hv4 Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady 
+I0606 14:16:03.133106      14 runners.go:184] proxy-service-d7hv4 Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady 
+I0606 14:16:04.133319      14 runners.go:184] proxy-service-d7hv4 Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady 
+I0606 14:16:05.133532      14 runners.go:184] proxy-service-d7hv4 Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady 
+I0606 14:16:06.133737      14 runners.go:184] proxy-service-d7hv4 Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady 
+I0606 14:16:07.133976      14 runners.go:184] proxy-service-d7hv4 Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady 
+I0606 14:16:08.134221      14 runners.go:184] proxy-service-d7hv4 Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady 
+I0606 14:16:09.134445      14 runners.go:184] proxy-service-d7hv4 Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady 
+I0606 14:16:10.134696      14 runners.go:184] proxy-service-d7hv4 Pods: 1 out of 1 created, 1 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady 
+Jun  6 14:16:10.136: INFO: setup took 10.066238083s, starting test cases
+STEP: running 16 cases, 20 attempts per case, 320 total attempts
+Jun  6 14:16:10.159: INFO: (0) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname1/proxy/: foo (200; 21.737827ms)
+Jun  6 14:16:10.160: INFO: (0) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:1080/proxy/: test<... (200; 22.508685ms)
+Jun  6 14:16:10.168: INFO: (0) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:160/proxy/: foo (200; 30.421044ms)
+Jun  6 14:16:10.172: INFO: (0) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t/proxy/: test (200; 35.117072ms)
+Jun  6 14:16:10.172: INFO: (0) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname1/proxy/: foo (200; 35.049735ms)
+Jun  6 14:16:10.172: INFO: (0) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:160/proxy/: foo (200; 34.974882ms)
+Jun  6 14:16:10.172: INFO: (0) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:1080/proxy/: ... (200; 35.383635ms)
+Jun  6 14:16:10.173: INFO: (0) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname2/proxy/: bar (200; 34.853183ms)
+Jun  6 14:16:10.173: INFO: (0) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:443/proxy/: test<... (200; 13.658158ms)
+Jun  6 14:16:10.201: INFO: (1) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:160/proxy/: foo (200; 14.084939ms)
+Jun  6 14:16:10.201: INFO: (1) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:162/proxy/: bar (200; 13.649802ms)
+Jun  6 14:16:10.201: INFO: (1) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:160/proxy/: foo (200; 14.18621ms)
+Jun  6 14:16:10.201: INFO: (1) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:1080/proxy/: ... (200; 14.516084ms)
+Jun  6 14:16:10.201: INFO: (1) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t/proxy/: test (200; 13.871846ms)
+Jun  6 14:16:10.201: INFO: (1) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:462/proxy/: tls qux (200; 14.16058ms)
+Jun  6 14:16:10.207: INFO: (2) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:160/proxy/: foo (200; 5.872733ms)
+Jun  6 14:16:10.207: INFO: (2) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:162/proxy/: bar (200; 5.802267ms)
+Jun  6 14:16:10.208: INFO: (2) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:462/proxy/: tls qux (200; 6.183581ms)
+Jun  6 14:16:10.208: INFO: (2) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:1080/proxy/: test<... (200; 6.585159ms)
+Jun  6 14:16:10.209: INFO: (2) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:162/proxy/: bar (200; 6.934198ms)
+Jun  6 14:16:10.209: INFO: (2) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t/proxy/: test (200; 6.549588ms)
+Jun  6 14:16:10.209: INFO: (2) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:443/proxy/: ... (200; 6.749704ms)
+Jun  6 14:16:10.210: INFO: (2) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:460/proxy/: tls baz (200; 7.887042ms)
+Jun  6 14:16:10.213: INFO: (2) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname2/proxy/: bar (200; 11.226377ms)
+Jun  6 14:16:10.217: INFO: (2) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname1/proxy/: tls baz (200; 14.062957ms)
+Jun  6 14:16:10.220: INFO: (2) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname1/proxy/: foo (200; 17.449475ms)
+Jun  6 14:16:10.220: INFO: (2) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:160/proxy/: foo (200; 17.451747ms)
+Jun  6 14:16:10.220: INFO: (2) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname1/proxy/: foo (200; 17.466528ms)
+Jun  6 14:16:10.220: INFO: (2) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname2/proxy/: bar (200; 18.295393ms)
+Jun  6 14:16:10.220: INFO: (2) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname2/proxy/: tls qux (200; 17.526955ms)
+Jun  6 14:16:10.230: INFO: (3) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t/proxy/: test (200; 9.002725ms)
+Jun  6 14:16:10.230: INFO: (3) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:460/proxy/: tls baz (200; 9.411234ms)
+Jun  6 14:16:10.230: INFO: (3) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:1080/proxy/: test<... (200; 9.543783ms)
+Jun  6 14:16:10.230: INFO: (3) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:160/proxy/: foo (200; 8.89416ms)
+Jun  6 14:16:10.230: INFO: (3) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:160/proxy/: foo (200; 9.821477ms)
+Jun  6 14:16:10.230: INFO: (3) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:443/proxy/: ... (200; 11.936906ms)
+Jun  6 14:16:10.236: INFO: (3) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname1/proxy/: tls baz (200; 14.610605ms)
+Jun  6 14:16:10.236: INFO: (3) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname2/proxy/: bar (200; 14.853945ms)
+Jun  6 14:16:10.236: INFO: (3) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname2/proxy/: bar (200; 14.866484ms)
+Jun  6 14:16:10.236: INFO: (3) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname1/proxy/: foo (200; 14.657608ms)
+Jun  6 14:16:10.236: INFO: (3) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname2/proxy/: tls qux (200; 14.679969ms)
+Jun  6 14:16:10.236: INFO: (3) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname1/proxy/: foo (200; 14.875516ms)
+Jun  6 14:16:10.240: INFO: (4) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:460/proxy/: tls baz (200; 3.545941ms)
+Jun  6 14:16:10.240: INFO: (4) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:462/proxy/: tls qux (200; 4.057486ms)
+Jun  6 14:16:10.240: INFO: (4) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:160/proxy/: foo (200; 4.094428ms)
+Jun  6 14:16:10.248: INFO: (4) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:443/proxy/: test (200; 16.718231ms)
+Jun  6 14:16:10.253: INFO: (4) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname2/proxy/: bar (200; 16.930272ms)
+Jun  6 14:16:10.253: INFO: (4) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname1/proxy/: foo (200; 16.872509ms)
+Jun  6 14:16:10.253: INFO: (4) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:160/proxy/: foo (200; 16.920445ms)
+Jun  6 14:16:10.253: INFO: (4) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname2/proxy/: bar (200; 17.16755ms)
+Jun  6 14:16:10.254: INFO: (4) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:162/proxy/: bar (200; 17.102748ms)
+Jun  6 14:16:10.254: INFO: (4) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname1/proxy/: tls baz (200; 17.226479ms)
+Jun  6 14:16:10.254: INFO: (4) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:162/proxy/: bar (200; 17.079815ms)
+Jun  6 14:16:10.254: INFO: (4) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:1080/proxy/: test<... (200; 17.095361ms)
+Jun  6 14:16:10.254: INFO: (4) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname2/proxy/: tls qux (200; 17.168885ms)
+Jun  6 14:16:10.254: INFO: (4) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:1080/proxy/: ... (200; 17.295096ms)
+Jun  6 14:16:10.254: INFO: (4) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname1/proxy/: foo (200; 17.334939ms)
+Jun  6 14:16:10.267: INFO: (5) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:162/proxy/: bar (200; 13.144836ms)
+Jun  6 14:16:10.267: INFO: (5) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:162/proxy/: bar (200; 12.647805ms)
+Jun  6 14:16:10.267: INFO: (5) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname1/proxy/: tls baz (200; 12.618095ms)
+Jun  6 14:16:10.267: INFO: (5) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:460/proxy/: tls baz (200; 12.862402ms)
+Jun  6 14:16:10.267: INFO: (5) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:462/proxy/: tls qux (200; 12.845182ms)
+Jun  6 14:16:10.267: INFO: (5) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:160/proxy/: foo (200; 13.073256ms)
+Jun  6 14:16:10.267: INFO: (5) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:443/proxy/: test (200; 12.708615ms)
+Jun  6 14:16:10.267: INFO: (5) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:1080/proxy/: test<... (200; 13.130396ms)
+Jun  6 14:16:10.268: INFO: (5) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:1080/proxy/: ... (200; 12.732939ms)
+Jun  6 14:16:10.268: INFO: (5) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname2/proxy/: tls qux (200; 13.29741ms)
+Jun  6 14:16:10.268: INFO: (5) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname2/proxy/: bar (200; 13.043408ms)
+Jun  6 14:16:10.268: INFO: (5) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname1/proxy/: foo (200; 13.485437ms)
+Jun  6 14:16:10.268: INFO: (5) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname2/proxy/: bar (200; 13.070908ms)
+Jun  6 14:16:10.268: INFO: (5) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname1/proxy/: foo (200; 13.497668ms)
+Jun  6 14:16:10.268: INFO: (5) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:160/proxy/: foo (200; 13.245277ms)
+Jun  6 14:16:10.273: INFO: (6) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t/proxy/: test (200; 5.13191ms)
+Jun  6 14:16:10.273: INFO: (6) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:1080/proxy/: ... (200; 4.417676ms)
+Jun  6 14:16:10.273: INFO: (6) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname2/proxy/: bar (200; 5.675765ms)
+Jun  6 14:16:10.273: INFO: (6) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:162/proxy/: bar (200; 5.559167ms)
+Jun  6 14:16:10.279: INFO: (6) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:1080/proxy/: test<... (200; 6.584391ms)
+Jun  6 14:16:10.280: INFO: (6) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:160/proxy/: foo (200; 6.968892ms)
+Jun  6 14:16:10.280: INFO: (6) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname2/proxy/: tls qux (200; 11.036726ms)
+Jun  6 14:16:10.281: INFO: (6) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname1/proxy/: foo (200; 8.068233ms)
+Jun  6 14:16:10.281: INFO: (6) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:162/proxy/: bar (200; 7.87941ms)
+Jun  6 14:16:10.281: INFO: (6) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:443/proxy/: test<... (200; 13.03015ms)
+Jun  6 14:16:10.298: INFO: (7) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:462/proxy/: tls qux (200; 13.117188ms)
+Jun  6 14:16:10.298: INFO: (7) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:443/proxy/: ... (200; 12.845977ms)
+Jun  6 14:16:10.298: INFO: (7) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:162/proxy/: bar (200; 12.808205ms)
+Jun  6 14:16:10.298: INFO: (7) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname1/proxy/: tls baz (200; 13.01589ms)
+Jun  6 14:16:10.298: INFO: (7) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t/proxy/: test (200; 12.785325ms)
+Jun  6 14:16:10.298: INFO: (7) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname2/proxy/: tls qux (200; 12.704786ms)
+Jun  6 14:16:10.298: INFO: (7) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:160/proxy/: foo (200; 12.67164ms)
+Jun  6 14:16:10.298: INFO: (7) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname1/proxy/: foo (200; 13.521484ms)
+Jun  6 14:16:10.298: INFO: (7) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname2/proxy/: bar (200; 13.45565ms)
+Jun  6 14:16:10.298: INFO: (7) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname1/proxy/: foo (200; 12.99792ms)
+Jun  6 14:16:10.299: INFO: (7) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname2/proxy/: bar (200; 13.397978ms)
+Jun  6 14:16:10.302: INFO: (8) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname1/proxy/: foo (200; 3.254062ms)
+Jun  6 14:16:10.314: INFO: (8) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t/proxy/: test (200; 12.57744ms)
+Jun  6 14:16:10.317: INFO: (8) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:162/proxy/: bar (200; 16.583646ms)
+Jun  6 14:16:10.317: INFO: (8) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:160/proxy/: foo (200; 15.456379ms)
+Jun  6 14:16:10.317: INFO: (8) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:460/proxy/: tls baz (200; 16.649759ms)
+Jun  6 14:16:10.317: INFO: (8) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:462/proxy/: tls qux (200; 16.640055ms)
+Jun  6 14:16:10.317: INFO: (8) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:160/proxy/: foo (200; 15.527204ms)
+Jun  6 14:16:10.317: INFO: (8) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname2/proxy/: bar (200; 16.772031ms)
+Jun  6 14:16:10.317: INFO: (8) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:1080/proxy/: test<... (200; 16.624662ms)
+Jun  6 14:16:10.317: INFO: (8) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:1080/proxy/: ... (200; 15.735191ms)
+Jun  6 14:16:10.317: INFO: (8) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:162/proxy/: bar (200; 15.708429ms)
+Jun  6 14:16:10.317: INFO: (8) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname2/proxy/: bar (200; 15.66896ms)
+Jun  6 14:16:10.317: INFO: (8) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname1/proxy/: tls baz (200; 15.723923ms)
+Jun  6 14:16:10.317: INFO: (8) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname1/proxy/: foo (200; 15.873247ms)
+Jun  6 14:16:10.318: INFO: (8) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:443/proxy/: ... (200; 9.984555ms)
+Jun  6 14:16:10.336: INFO: (9) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t/proxy/: test (200; 10.15326ms)
+Jun  6 14:16:10.336: INFO: (9) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:162/proxy/: bar (200; 17.606594ms)
+Jun  6 14:16:10.336: INFO: (9) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:160/proxy/: foo (200; 17.670286ms)
+Jun  6 14:16:10.336: INFO: (9) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:443/proxy/: test<... (200; 17.889756ms)
+Jun  6 14:16:10.341: INFO: (9) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname2/proxy/: bar (200; 15.250532ms)
+Jun  6 14:16:10.341: INFO: (9) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname1/proxy/: tls baz (200; 22.83836ms)
+Jun  6 14:16:10.341: INFO: (9) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname2/proxy/: bar (200; 15.221062ms)
+Jun  6 14:16:10.345: INFO: (10) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname1/proxy/: foo (200; 4.242142ms)
+Jun  6 14:16:10.345: INFO: (10) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:460/proxy/: tls baz (200; 4.178ms)
+Jun  6 14:16:10.351: INFO: (10) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:462/proxy/: tls qux (200; 9.909104ms)
+Jun  6 14:16:10.351: INFO: (10) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:1080/proxy/: test<... (200; 9.912497ms)
+Jun  6 14:16:10.360: INFO: (10) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname1/proxy/: tls baz (200; 18.754389ms)
+Jun  6 14:16:10.360: INFO: (10) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:162/proxy/: bar (200; 18.692052ms)
+Jun  6 14:16:10.361: INFO: (10) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t/proxy/: test (200; 19.805167ms)
+Jun  6 14:16:10.361: INFO: (10) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:160/proxy/: foo (200; 19.789942ms)
+Jun  6 14:16:10.361: INFO: (10) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:160/proxy/: foo (200; 19.781463ms)
+Jun  6 14:16:10.361: INFO: (10) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:443/proxy/: ... (200; 19.85938ms)
+Jun  6 14:16:10.361: INFO: (10) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:162/proxy/: bar (200; 20.072225ms)
+Jun  6 14:16:10.361: INFO: (10) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname1/proxy/: foo (200; 19.819338ms)
+Jun  6 14:16:10.361: INFO: (10) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname2/proxy/: bar (200; 19.990346ms)
+Jun  6 14:16:10.361: INFO: (10) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname2/proxy/: tls qux (200; 19.979247ms)
+Jun  6 14:16:10.360: INFO: (10) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname2/proxy/: bar (200; 18.952772ms)
+Jun  6 14:16:10.366: INFO: (11) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t/proxy/: test (200; 5.167417ms)
+Jun  6 14:16:10.374: INFO: (11) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:160/proxy/: foo (200; 8.653203ms)
+Jun  6 14:16:10.374: INFO: (11) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:462/proxy/: tls qux (200; 8.750592ms)
+Jun  6 14:16:10.374: INFO: (11) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:460/proxy/: tls baz (200; 8.033768ms)
+Jun  6 14:16:10.377: INFO: (11) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname2/proxy/: tls qux (200; 11.937858ms)
+Jun  6 14:16:10.377: INFO: (11) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:443/proxy/: test<... (200; 11.230086ms)
+Jun  6 14:16:10.377: INFO: (11) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname1/proxy/: foo (200; 16.116641ms)
+Jun  6 14:16:10.377: INFO: (11) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:162/proxy/: bar (200; 11.171207ms)
+Jun  6 14:16:10.380: INFO: (11) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname1/proxy/: tls baz (200; 13.676434ms)
+Jun  6 14:16:10.380: INFO: (11) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname2/proxy/: bar (200; 13.723189ms)
+Jun  6 14:16:10.380: INFO: (11) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname1/proxy/: foo (200; 13.859341ms)
+Jun  6 14:16:10.380: INFO: (11) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname2/proxy/: bar (200; 13.683896ms)
+Jun  6 14:16:10.380: INFO: (11) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:1080/proxy/: ... (200; 14.232633ms)
+Jun  6 14:16:10.383: INFO: (12) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:460/proxy/: tls baz (200; 2.648941ms)
+Jun  6 14:16:10.391: INFO: (12) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname2/proxy/: bar (200; 7.911944ms)
+Jun  6 14:16:10.391: INFO: (12) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:443/proxy/: test (200; 7.909808ms)
+Jun  6 14:16:10.391: INFO: (12) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:162/proxy/: bar (200; 7.871457ms)
+Jun  6 14:16:10.391: INFO: (12) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:1080/proxy/: ... (200; 7.907366ms)
+Jun  6 14:16:10.391: INFO: (12) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:160/proxy/: foo (200; 8.058077ms)
+Jun  6 14:16:10.391: INFO: (12) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname2/proxy/: bar (200; 9.021773ms)
+Jun  6 14:16:10.395: INFO: (12) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:162/proxy/: bar (200; 12.122945ms)
+Jun  6 14:16:10.395: INFO: (12) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname1/proxy/: tls baz (200; 12.47429ms)
+Jun  6 14:16:10.395: INFO: (12) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:462/proxy/: tls qux (200; 12.216234ms)
+Jun  6 14:16:10.395: INFO: (12) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:1080/proxy/: test<... (200; 12.207338ms)
+Jun  6 14:16:10.398: INFO: (12) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname2/proxy/: tls qux (200; 15.148148ms)
+Jun  6 14:16:10.398: INFO: (12) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname1/proxy/: foo (200; 9.537715ms)
+Jun  6 14:16:10.399: INFO: (12) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname1/proxy/: foo (200; 15.842182ms)
+Jun  6 14:16:10.406: INFO: (13) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:160/proxy/: foo (200; 6.989365ms)
+Jun  6 14:16:10.406: INFO: (13) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:162/proxy/: bar (200; 7.096671ms)
+Jun  6 14:16:10.409: INFO: (13) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:1080/proxy/: ... (200; 9.716352ms)
+Jun  6 14:16:10.409: INFO: (13) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:162/proxy/: bar (200; 9.124623ms)
+Jun  6 14:16:10.410: INFO: (13) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t/proxy/: test (200; 9.85533ms)
+Jun  6 14:16:10.410: INFO: (13) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:443/proxy/: test<... (200; 10.131153ms)
+Jun  6 14:16:10.418: INFO: (13) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:462/proxy/: tls qux (200; 18.806296ms)
+Jun  6 14:16:10.418: INFO: (13) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname2/proxy/: bar (200; 18.736892ms)
+Jun  6 14:16:10.418: INFO: (13) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname1/proxy/: foo (200; 18.947019ms)
+Jun  6 14:16:10.418: INFO: (13) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:460/proxy/: tls baz (200; 18.719283ms)
+Jun  6 14:16:10.418: INFO: (13) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname2/proxy/: bar (200; 18.63055ms)
+Jun  6 14:16:10.418: INFO: (13) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname1/proxy/: foo (200; 19.041496ms)
+Jun  6 14:16:10.418: INFO: (13) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:160/proxy/: foo (200; 19.113975ms)
+Jun  6 14:16:10.418: INFO: (13) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname1/proxy/: tls baz (200; 18.58168ms)
+Jun  6 14:16:10.418: INFO: (13) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname2/proxy/: tls qux (200; 18.984456ms)
+Jun  6 14:16:10.434: INFO: (14) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:443/proxy/: ... (200; 15.760564ms)
+Jun  6 14:16:10.434: INFO: (14) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:162/proxy/: bar (200; 15.722733ms)
+Jun  6 14:16:10.434: INFO: (14) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t/proxy/: test (200; 15.847782ms)
+Jun  6 14:16:10.458: INFO: (14) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:1080/proxy/: test<... (200; 39.396818ms)
+Jun  6 14:16:10.458: INFO: (14) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname1/proxy/: foo (200; 39.757177ms)
+Jun  6 14:16:10.459: INFO: (14) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:462/proxy/: tls qux (200; 39.560032ms)
+Jun  6 14:16:10.459: INFO: (14) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname1/proxy/: foo (200; 39.999335ms)
+Jun  6 14:16:10.459: INFO: (14) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:162/proxy/: bar (200; 39.917835ms)
+Jun  6 14:16:10.459: INFO: (14) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname2/proxy/: bar (200; 39.84428ms)
+Jun  6 14:16:10.459: INFO: (14) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname2/proxy/: bar (200; 40.085379ms)
+Jun  6 14:16:10.459: INFO: (14) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname2/proxy/: tls qux (200; 40.517536ms)
+Jun  6 14:16:10.459: INFO: (14) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname1/proxy/: tls baz (200; 40.140911ms)
+Jun  6 14:16:10.459: INFO: (14) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:160/proxy/: foo (200; 40.498373ms)
+Jun  6 14:16:10.459: INFO: (14) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:460/proxy/: tls baz (200; 40.374603ms)
+Jun  6 14:16:10.471: INFO: (15) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname2/proxy/: bar (200; 6.594961ms)
+Jun  6 14:16:10.471: INFO: (15) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:162/proxy/: bar (200; 11.518029ms)
+Jun  6 14:16:10.471: INFO: (15) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname2/proxy/: tls qux (200; 10.340517ms)
+Jun  6 14:16:10.472: INFO: (15) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:443/proxy/: ... (200; 13.170995ms)
+Jun  6 14:16:10.484: INFO: (15) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:1080/proxy/: test<... (200; 12.090309ms)
+Jun  6 14:16:10.484: INFO: (15) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t/proxy/: test (200; 13.563781ms)
+Jun  6 14:16:10.485: INFO: (15) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:160/proxy/: foo (200; 12.738329ms)
+Jun  6 14:16:10.485: INFO: (15) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:460/proxy/: tls baz (200; 12.638802ms)
+Jun  6 14:16:10.485: INFO: (15) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname1/proxy/: foo (200; 13.128512ms)
+Jun  6 14:16:10.495: INFO: (16) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:460/proxy/: tls baz (200; 9.939265ms)
+Jun  6 14:16:10.495: INFO: (16) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:160/proxy/: foo (200; 10.0325ms)
+Jun  6 14:16:10.495: INFO: (16) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname1/proxy/: foo (200; 10.147228ms)
+Jun  6 14:16:10.495: INFO: (16) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname1/proxy/: foo (200; 10.200929ms)
+Jun  6 14:16:10.495: INFO: (16) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname2/proxy/: tls qux (200; 10.153337ms)
+Jun  6 14:16:10.496: INFO: (16) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:162/proxy/: bar (200; 10.756753ms)
+Jun  6 14:16:10.510: INFO: (16) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname2/proxy/: bar (200; 17.411023ms)
+Jun  6 14:16:10.510: INFO: (16) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:1080/proxy/: ... (200; 15.387973ms)
+Jun  6 14:16:10.510: INFO: (16) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:462/proxy/: tls qux (200; 24.808999ms)
+Jun  6 14:16:10.510: INFO: (16) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:160/proxy/: foo (200; 18.647788ms)
+Jun  6 14:16:10.510: INFO: (16) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname1/proxy/: tls baz (200; 17.456139ms)
+Jun  6 14:16:10.510: INFO: (16) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname2/proxy/: bar (200; 17.541209ms)
+Jun  6 14:16:10.510: INFO: (16) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:162/proxy/: bar (200; 15.586801ms)
+Jun  6 14:16:10.510: INFO: (16) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t/proxy/: test (200; 15.523584ms)
+Jun  6 14:16:10.510: INFO: (16) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:1080/proxy/: test<... (200; 17.331369ms)
+Jun  6 14:16:10.510: INFO: (16) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:443/proxy/: test<... (200; 10.487262ms)
+Jun  6 14:16:10.522: INFO: (17) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:443/proxy/: test (200; 10.97219ms)
+Jun  6 14:16:10.522: INFO: (17) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:1080/proxy/: ... (200; 10.890827ms)
+Jun  6 14:16:10.522: INFO: (17) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:460/proxy/: tls baz (200; 10.460136ms)
+Jun  6 14:16:10.527: INFO: (17) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname2/proxy/: bar (200; 15.569548ms)
+Jun  6 14:16:10.527: INFO: (17) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname1/proxy/: tls baz (200; 15.551289ms)
+Jun  6 14:16:10.527: INFO: (17) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname1/proxy/: foo (200; 15.989969ms)
+Jun  6 14:16:10.527: INFO: (17) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname2/proxy/: bar (200; 15.726672ms)
+Jun  6 14:16:10.527: INFO: (17) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname1/proxy/: foo (200; 16.115208ms)
+Jun  6 14:16:10.527: INFO: (17) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname2/proxy/: tls qux (200; 16.034073ms)
+Jun  6 14:16:10.534: INFO: (18) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:160/proxy/: foo (200; 6.831386ms)
+Jun  6 14:16:10.539: INFO: (18) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:443/proxy/: test<... (200; 11.40762ms)
+Jun  6 14:16:10.539: INFO: (18) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:460/proxy/: tls baz (200; 11.490939ms)
+Jun  6 14:16:10.539: INFO: (18) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname2/proxy/: bar (200; 11.698958ms)
+Jun  6 14:16:10.539: INFO: (18) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:462/proxy/: tls qux (200; 11.496731ms)
+Jun  6 14:16:10.539: INFO: (18) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:160/proxy/: foo (200; 11.237057ms)
+Jun  6 14:16:10.539: INFO: (18) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t/proxy/: test (200; 11.373097ms)
+Jun  6 14:16:10.539: INFO: (18) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:1080/proxy/: ... (200; 11.366556ms)
+Jun  6 14:16:10.539: INFO: (18) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:162/proxy/: bar (200; 11.357831ms)
+Jun  6 14:16:10.542: INFO: (18) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname1/proxy/: foo (200; 14.812756ms)
+Jun  6 14:16:10.543: INFO: (18) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname1/proxy/: foo (200; 15.31415ms)
+Jun  6 14:16:10.543: INFO: (18) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname1/proxy/: tls baz (200; 15.308808ms)
+Jun  6 14:16:10.543: INFO: (18) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname2/proxy/: bar (200; 15.458941ms)
+Jun  6 14:16:10.543: INFO: (18) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname2/proxy/: tls qux (200; 15.432207ms)
+Jun  6 14:16:10.549: INFO: (19) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname2/proxy/: tls qux (200; 5.693919ms)
+Jun  6 14:16:10.551: INFO: (19) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:462/proxy/: tls qux (200; 7.577782ms)
+Jun  6 14:16:10.555: INFO: (19) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname2/proxy/: bar (200; 12.255428ms)
+Jun  6 14:16:10.556: INFO: (19) /api/v1/namespaces/proxy-5451/services/https:proxy-service-d7hv4:tlsportname1/proxy/: tls baz (200; 12.115255ms)
+Jun  6 14:16:10.556: INFO: (19) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname2/proxy/: bar (200; 12.209133ms)
+Jun  6 14:16:10.565: INFO: (19) /api/v1/namespaces/proxy-5451/services/http:proxy-service-d7hv4:portname1/proxy/: foo (200; 19.931351ms)
+Jun  6 14:16:10.565: INFO: (19) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:162/proxy/: bar (200; 20.024893ms)
+Jun  6 14:16:10.565: INFO: (19) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:160/proxy/: foo (200; 19.916056ms)
+Jun  6 14:16:10.565: INFO: (19) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:1080/proxy/: test<... (200; 19.95735ms)
+Jun  6 14:16:10.565: INFO: (19) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t:162/proxy/: bar (200; 19.9015ms)
+Jun  6 14:16:10.565: INFO: (19) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:460/proxy/: tls baz (200; 19.88967ms)
+Jun  6 14:16:10.565: INFO: (19) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:1080/proxy/: ... (200; 20.128416ms)
+Jun  6 14:16:10.565: INFO: (19) /api/v1/namespaces/proxy-5451/pods/proxy-service-d7hv4-4528t/proxy/: test (200; 20.19702ms)
+Jun  6 14:16:10.565: INFO: (19) /api/v1/namespaces/proxy-5451/services/proxy-service-d7hv4:portname1/proxy/: foo (200; 20.104873ms)
+Jun  6 14:16:10.565: INFO: (19) /api/v1/namespaces/proxy-5451/pods/http:proxy-service-d7hv4-4528t:160/proxy/: foo (200; 19.997059ms)
+Jun  6 14:16:10.578: INFO: (19) /api/v1/namespaces/proxy-5451/pods/https:proxy-service-d7hv4-4528t:443/proxy/: >> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename configmap
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in configmap-8981
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating configMap with name configmap-test-volume-map-a721fb17-8865-11e9-b613-8a9bc7c14a19
+STEP: Creating a pod to test consume configMaps
+Jun  6 14:16:18.867: INFO: Waiting up to 5m0s for pod "pod-configmaps-a72255b0-8865-11e9-b613-8a9bc7c14a19" in namespace "configmap-8981" to be "success or failure"
+Jun  6 14:16:18.873: INFO: Pod "pod-configmaps-a72255b0-8865-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 4.073633ms
+Jun  6 14:16:20.875: INFO: Pod "pod-configmaps-a72255b0-8865-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006426806s
+STEP: Saw pod success
+Jun  6 14:16:20.875: INFO: Pod "pod-configmaps-a72255b0-8865-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:16:20.877: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-configmaps-a72255b0-8865-11e9-b613-8a9bc7c14a19 container configmap-volume-test: 
+STEP: delete the pod
+Jun  6 14:16:20.889: INFO: Waiting for pod pod-configmaps-a72255b0-8865-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:16:20.891: INFO: Pod pod-configmaps-a72255b0-8865-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:16:20.891: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "configmap-8981" for this suite.
+Jun  6 14:16:26.900: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:16:26.966: INFO: namespace configmap-8981 deletion completed in 6.073398983s
+
+• [SLOW TEST:8.231 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32
+  should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSS
+------------------------------
+[sig-storage] EmptyDir wrapper volumes 
+  should not conflict [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] EmptyDir wrapper volumes
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:16:26.967: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename emptydir-wrapper
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in emptydir-wrapper-9918
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should not conflict [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Cleaning up the secret
+STEP: Cleaning up the configmap
+STEP: Cleaning up the pod
+[AfterEach] [sig-storage] EmptyDir wrapper volumes
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:16:29.125: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-wrapper-9918" for this suite.
+Jun  6 14:16:35.135: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:16:35.254: INFO: namespace emptydir-wrapper-9918 deletion completed in 6.126658989s
+
+• [SLOW TEST:8.288 seconds]
+[sig-storage] EmptyDir wrapper volumes
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22
+  should not conflict [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Variable Expansion 
+  should allow substituting values in a container's args [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Variable Expansion
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:16:35.255: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename var-expansion
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in var-expansion-6886
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should allow substituting values in a container's args [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test substitution in container's args
+Jun  6 14:16:35.394: INFO: Waiting up to 5m0s for pod "var-expansion-b0fbe278-8865-11e9-b613-8a9bc7c14a19" in namespace "var-expansion-6886" to be "success or failure"
+Jun  6 14:16:35.399: INFO: Pod "var-expansion-b0fbe278-8865-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 5.06033ms
+Jun  6 14:16:37.401: INFO: Pod "var-expansion-b0fbe278-8865-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.007782336s
+STEP: Saw pod success
+Jun  6 14:16:37.401: INFO: Pod "var-expansion-b0fbe278-8865-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:16:37.404: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod var-expansion-b0fbe278-8865-11e9-b613-8a9bc7c14a19 container dapi-container: 
+STEP: delete the pod
+Jun  6 14:16:37.419: INFO: Waiting for pod var-expansion-b0fbe278-8865-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:16:37.422: INFO: Pod var-expansion-b0fbe278-8865-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [k8s.io] Variable Expansion
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:16:37.422: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "var-expansion-6886" for this suite.
+Jun  6 14:16:43.431: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:16:43.499: INFO: namespace var-expansion-6886 deletion completed in 6.075478489s
+
+• [SLOW TEST:8.245 seconds]
+[k8s.io] Variable Expansion
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should allow substituting values in a container's args [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSS
+------------------------------
+[sig-storage] Secrets 
+  should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Secrets
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:16:43.500: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename secrets
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in secrets-5363
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating secret with name secret-test-b5e4c00a-8865-11e9-b613-8a9bc7c14a19
+STEP: Creating a pod to test consume secrets
+Jun  6 14:16:43.633: INFO: Waiting up to 5m0s for pod "pod-secrets-b5e52c54-8865-11e9-b613-8a9bc7c14a19" in namespace "secrets-5363" to be "success or failure"
+Jun  6 14:16:43.641: INFO: Pod "pod-secrets-b5e52c54-8865-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 7.028921ms
+Jun  6 14:16:45.643: INFO: Pod "pod-secrets-b5e52c54-8865-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.009410068s
+STEP: Saw pod success
+Jun  6 14:16:45.643: INFO: Pod "pod-secrets-b5e52c54-8865-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:16:45.645: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-secrets-b5e52c54-8865-11e9-b613-8a9bc7c14a19 container secret-volume-test: 
+STEP: delete the pod
+Jun  6 14:16:45.661: INFO: Waiting for pod pod-secrets-b5e52c54-8865-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:16:45.663: INFO: Pod pod-secrets-b5e52c54-8865-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:16:45.663: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "secrets-5363" for this suite.
+Jun  6 14:16:51.673: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:16:51.741: INFO: namespace secrets-5363 deletion completed in 6.074828153s
+
+• [SLOW TEST:8.241 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:33
+  should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-network] Networking Granular Checks: Pods 
+  should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-network] Networking
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:16:51.742: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename pod-network-test
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in pod-network-test-3422
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Performing setup for networking test in namespace pod-network-test-3422
+STEP: creating a selector
+STEP: Creating the service pods in kubernetes
+Jun  6 14:16:51.868: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable
+STEP: Creating test pods
+Jun  6 14:17:13.922: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostName | nc -w 1 -u 100.96.2.228 8081 | grep -v '^\s*$'] Namespace:pod-network-test-3422 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  6 14:17:13.922: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+Jun  6 14:17:15.068: INFO: Found all expected endpoints: [netserver-0]
+Jun  6 14:17:15.070: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostName | nc -w 1 -u 100.96.1.68 8081 | grep -v '^\s*$'] Namespace:pod-network-test-3422 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  6 14:17:15.070: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+Jun  6 14:17:16.266: INFO: Found all expected endpoints: [netserver-1]
+[AfterEach] [sig-network] Networking
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:17:16.267: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pod-network-test-3422" for this suite.
+Jun  6 14:17:38.277: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:17:38.349: INFO: namespace pod-network-test-3422 deletion completed in 22.080634097s
+
+• [SLOW TEST:46.608 seconds]
+[sig-network] Networking
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:25
+  Granular Checks: Pods
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:28
+    should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance]
+    /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] Deployment 
+  deployment should support proportional scaling [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:17:38.350: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename deployment
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in deployment-50
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:65
+[It] deployment should support proportional scaling [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+Jun  6 14:17:38.475: INFO: Creating deployment "nginx-deployment"
+Jun  6 14:17:38.478: INFO: Waiting for observed generation 1
+Jun  6 14:17:40.498: INFO: Waiting for all required pods to come up
+Jun  6 14:17:40.501: INFO: Pod name nginx: Found 10 pods out of 10
+STEP: ensuring each pod is running
+Jun  6 14:17:44.521: INFO: Waiting for deployment "nginx-deployment" to complete
+Jun  6 14:17:44.526: INFO: Updating deployment "nginx-deployment" with a non-existent image
+Jun  6 14:17:44.593: INFO: Updating deployment nginx-deployment
+Jun  6 14:17:44.593: INFO: Waiting for observed generation 2
+Jun  6 14:17:46.644: INFO: Waiting for the first rollout's replicaset to have .status.availableReplicas = 8
+Jun  6 14:17:46.646: INFO: Waiting for the first rollout's replicaset to have .spec.replicas = 8
+Jun  6 14:17:46.648: INFO: Waiting for the first rollout's replicaset of deployment "nginx-deployment" to have desired number of replicas
+Jun  6 14:17:46.653: INFO: Verifying that the second rollout's replicaset has .status.availableReplicas = 0
+Jun  6 14:17:46.653: INFO: Waiting for the second rollout's replicaset to have .spec.replicas = 5
+Jun  6 14:17:46.654: INFO: Waiting for the second rollout's replicaset of deployment "nginx-deployment" to have desired number of replicas
+Jun  6 14:17:46.657: INFO: Verifying that deployment "nginx-deployment" has minimum required number of available replicas
+Jun  6 14:17:46.657: INFO: Scaling up the deployment "nginx-deployment" from 10 to 30
+Jun  6 14:17:46.663: INFO: Updating deployment nginx-deployment
+Jun  6 14:17:46.663: INFO: Waiting for the replicasets of deployment "nginx-deployment" to have desired number of replicas
+Jun  6 14:17:46.671: INFO: Verifying that first rollout's replicaset has .spec.replicas = 20
+Jun  6 14:17:46.680: INFO: Verifying that second rollout's replicaset has .spec.replicas = 13
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:59
+Jun  6 14:17:46.753: INFO: Deployment "nginx-deployment":
+&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment,GenerateName:,Namespace:deployment-50,SelfLink:/apis/apps/v1/namespaces/deployment-50/deployments/nginx-deployment,UID:d6965f9a-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24147,Generation:3,CreationTimestamp:2019-06-06 14:17:38 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,},Annotations:map[string]string{deployment.kubernetes.io/revision: 2,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:DeploymentSpec{Replicas:*30,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: nginx,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:2,MaxSurge:3,},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:3,Replicas:13,UpdatedReplicas:5,AvailableReplicas:8,UnavailableReplicas:25,Conditions:[{Progressing True 2019-06-06 14:17:44 +0000 UTC 2019-06-06 14:17:38 +0000 UTC ReplicaSetUpdated ReplicaSet "nginx-deployment-5f9595f595" is progressing.} {Available False 2019-06-06 14:17:46 +0000 UTC 2019-06-06 14:17:46 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.}],ReadyReplicas:8,CollisionCount:nil,},}
+
+Jun  6 14:17:46.780: INFO: New ReplicaSet "nginx-deployment-5f9595f595" of Deployment "nginx-deployment":
+&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595,GenerateName:,Namespace:deployment-50,SelfLink:/apis/apps/v1/namespaces/deployment-50/replicasets/nginx-deployment-5f9595f595,UID:da329219-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24138,Generation:3,CreationTimestamp:2019-06-06 14:17:44 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 30,deployment.kubernetes.io/max-replicas: 33,deployment.kubernetes.io/revision: 2,},OwnerReferences:[{apps/v1 Deployment nginx-deployment d6965f9a-8865-11e9-bdc9-0231d0af67bc 0xc002b84ba7 0xc002b84ba8}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*13,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:5,FullyLabeledReplicas:5,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},}
+Jun  6 14:17:46.780: INFO: All old ReplicaSets of Deployment "nginx-deployment":
+Jun  6 14:17:46.780: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8,GenerateName:,Namespace:deployment-50,SelfLink:/apis/apps/v1/namespaces/deployment-50/replicasets/nginx-deployment-6f478d8d8,UID:d696cfa0-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24186,Generation:3,CreationTimestamp:2019-06-06 14:17:38 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 30,deployment.kubernetes.io/max-replicas: 33,deployment.kubernetes.io/revision: 1,},OwnerReferences:[{apps/v1 Deployment nginx-deployment d6965f9a-8865-11e9-bdc9-0231d0af67bc 0xc002b84c77 0xc002b84c78}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*20,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:8,FullyLabeledReplicas:8,ObservedGeneration:3,ReadyReplicas:8,AvailableReplicas:8,Conditions:[],},}
+Jun  6 14:17:46.818: INFO: Pod "nginx-deployment-5f9595f595-2946h" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595-2946h,GenerateName:nginx-deployment-5f9595f595-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-5f9595f595-2946h,UID:db7eb0f8-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24182,Generation:0,CreationTimestamp:2019-06-06 14:17:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-5f9595f595 da329219-8865-11e9-bdc9-0231d0af67bc 0xc002b85527 0xc002b85528}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-89-18.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b85590} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b855b0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:46 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.819: INFO: Pod "nginx-deployment-5f9595f595-2jtxj" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595-2jtxj,GenerateName:nginx-deployment-5f9595f595-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-5f9595f595-2jtxj,UID:db7b45d2-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24178,Generation:0,CreationTimestamp:2019-06-06 14:17:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-5f9595f595 da329219-8865-11e9-bdc9-0231d0af67bc 0xc002b85630 0xc002b85631}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-89-18.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b856a0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b856c0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:46 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.819: INFO: Pod "nginx-deployment-5f9595f595-6zq8s" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595-6zq8s,GenerateName:nginx-deployment-5f9595f595-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-5f9595f595-6zq8s,UID:da4492df-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24130,Generation:0,CreationTimestamp:2019-06-06 14:17:44 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{cni.projectcalico.org/podIP: 100.96.2.238/32,kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-5f9595f595 da329219-8865-11e9-bdc9-0231d0af67bc 0xc002b85750 0xc002b85751}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-66-200.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b857c0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b857e0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:44 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:44 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:44 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:44 +0000 UTC  }],Message:,Reason:,HostIP:172.16.66.200,PodIP:100.96.2.238,StartTime:2019-06-06 14:17:44 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ErrImagePull,Message:rpc error: code = Unknown desc = Error response from daemon: manifest for nginx:404 not found,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.819: INFO: Pod "nginx-deployment-5f9595f595-d987t" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595-d987t,GenerateName:nginx-deployment-5f9595f595-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-5f9595f595-d987t,UID:db79540c-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24174,Generation:0,CreationTimestamp:2019-06-06 14:17:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-5f9595f595 da329219-8865-11e9-bdc9-0231d0af67bc 0xc002b858d0 0xc002b858d1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-89-18.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b85940} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b85960}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:46 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:46 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:46 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:46 +0000 UTC  }],Message:,Reason:,HostIP:172.16.89.18,PodIP:,StartTime:2019-06-06 14:17:46 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.819: INFO: Pod "nginx-deployment-5f9595f595-ffth2" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595-ffth2,GenerateName:nginx-deployment-5f9595f595-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-5f9595f595-ffth2,UID:db832e80-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24193,Generation:0,CreationTimestamp:2019-06-06 14:17:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-5f9595f595 da329219-8865-11e9-bdc9-0231d0af67bc 0xc002b85a30 0xc002b85a31}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-66-200.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b85aa0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b85ac0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:46 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.819: INFO: Pod "nginx-deployment-5f9595f595-ghprw" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595-ghprw,GenerateName:nginx-deployment-5f9595f595-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-5f9595f595-ghprw,UID:da463020-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24119,Generation:0,CreationTimestamp:2019-06-06 14:17:44 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{cni.projectcalico.org/podIP: 100.96.2.237/32,kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-5f9595f595 da329219-8865-11e9-bdc9-0231d0af67bc 0xc002b85b50 0xc002b85b51}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-66-200.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b85bc0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b85be0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:44 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:44 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:44 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:44 +0000 UTC  }],Message:,Reason:,HostIP:172.16.66.200,PodIP:,StartTime:2019-06-06 14:17:44 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.819: INFO: Pod "nginx-deployment-5f9595f595-hmmfj" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595-hmmfj,GenerateName:nginx-deployment-5f9595f595-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-5f9595f595-hmmfj,UID:da35bfa3-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24131,Generation:0,CreationTimestamp:2019-06-06 14:17:44 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{cni.projectcalico.org/podIP: 100.96.2.236/32,kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-5f9595f595 da329219-8865-11e9-bdc9-0231d0af67bc 0xc002b85cc0 0xc002b85cc1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-66-200.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b85d30} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b85d50}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:44 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:44 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:44 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:44 +0000 UTC  }],Message:,Reason:,HostIP:172.16.66.200,PodIP:100.96.2.236,StartTime:2019-06-06 14:17:44 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ErrImagePull,Message:rpc error: code = Unknown desc = Error response from daemon: manifest for nginx:404 not found,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.819: INFO: Pod "nginx-deployment-5f9595f595-j5jgc" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595-j5jgc,GenerateName:nginx-deployment-5f9595f595-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-5f9595f595-j5jgc,UID:da355edd-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24113,Generation:0,CreationTimestamp:2019-06-06 14:17:44 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{cni.projectcalico.org/podIP: 100.96.1.74/32,kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-5f9595f595 da329219-8865-11e9-bdc9-0231d0af67bc 0xc002b85e50 0xc002b85e51}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-89-18.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b85ec0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b85ee0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:44 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:44 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:44 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:44 +0000 UTC  }],Message:,Reason:,HostIP:172.16.89.18,PodIP:100.96.1.74,StartTime:2019-06-06 14:17:44 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ErrImagePull,Message:rpc error: code = Unknown desc = Error response from daemon: manifest for nginx:404 not found,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.819: INFO: Pod "nginx-deployment-5f9595f595-l229r" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595-l229r,GenerateName:nginx-deployment-5f9595f595-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-5f9595f595-l229r,UID:db7e70a2-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24179,Generation:0,CreationTimestamp:2019-06-06 14:17:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-5f9595f595 da329219-8865-11e9-bdc9-0231d0af67bc 0xc002b85fd0 0xc002b85fd1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-66-200.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b4e040} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b4e060}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:46 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.820: INFO: Pod "nginx-deployment-5f9595f595-n2h49" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595-n2h49,GenerateName:nginx-deployment-5f9595f595-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-5f9595f595-n2h49,UID:da3350fd-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24133,Generation:0,CreationTimestamp:2019-06-06 14:17:44 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{cni.projectcalico.org/podIP: 100.96.2.235/32,kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-5f9595f595 da329219-8865-11e9-bdc9-0231d0af67bc 0xc002b4e0f0 0xc002b4e0f1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-66-200.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b4e160} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b4e180}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:44 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:44 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:44 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:44 +0000 UTC  }],Message:,Reason:,HostIP:172.16.66.200,PodIP:100.96.2.235,StartTime:2019-06-06 14:17:44 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ErrImagePull,Message:rpc error: code = Unknown desc = Error response from daemon: manifest for nginx:404 not found,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.820: INFO: Pod "nginx-deployment-5f9595f595-qhxll" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595-qhxll,GenerateName:nginx-deployment-5f9595f595-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-5f9595f595-qhxll,UID:db7e4b4c-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24183,Generation:0,CreationTimestamp:2019-06-06 14:17:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-5f9595f595 da329219-8865-11e9-bdc9-0231d0af67bc 0xc002b4e270 0xc002b4e271}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-89-18.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b4e2e0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b4e300}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:46 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.820: INFO: Pod "nginx-deployment-5f9595f595-x5tfl" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595-x5tfl,GenerateName:nginx-deployment-5f9595f595-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-5f9595f595-x5tfl,UID:db7be826-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24176,Generation:0,CreationTimestamp:2019-06-06 14:17:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-5f9595f595 da329219-8865-11e9-bdc9-0231d0af67bc 0xc002b4e380 0xc002b4e381}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-89-18.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b4e3f0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b4e410}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:46 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.820: INFO: Pod "nginx-deployment-5f9595f595-z82p2" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595-z82p2,GenerateName:nginx-deployment-5f9595f595-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-5f9595f595-z82p2,UID:db7e9059-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24180,Generation:0,CreationTimestamp:2019-06-06 14:17:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-5f9595f595 da329219-8865-11e9-bdc9-0231d0af67bc 0xc002b4e490 0xc002b4e491}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-66-200.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b4e500} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b4e520}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:46 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.820: INFO: Pod "nginx-deployment-6f478d8d8-49tz4" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-49tz4,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-6f478d8d8-49tz4,UID:db79805c-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24152,Generation:0,CreationTimestamp:2019-06-06 14:17:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 d696cfa0-8865-11e9-bdc9-0231d0af67bc 0xc002b4e5a0 0xc002b4e5a1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-66-200.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b4e600} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b4e620}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:46 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.821: INFO: Pod "nginx-deployment-6f478d8d8-4jn76" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-4jn76,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-6f478d8d8-4jn76,UID:d69c6604-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24026,Generation:0,CreationTimestamp:2019-06-06 14:17:38 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{cni.projectcalico.org/podIP: 100.96.1.72/32,kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 d696cfa0-8865-11e9-bdc9-0231d0af67bc 0xc002b4e6b0 0xc002b4e6b1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-89-18.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b4e710} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b4e730}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:38 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:41 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:41 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:38 +0000 UTC  }],Message:,Reason:,HostIP:172.16.89.18,PodIP:100.96.1.72,StartTime:2019-06-06 14:17:38 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-06 14:17:40 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://cec429f4976c98f65848a8eac38f40b13f195b8c6ed61c96e53ae602a4d42087}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.821: INFO: Pod "nginx-deployment-6f478d8d8-6bnd8" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-6bnd8,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-6f478d8d8-6bnd8,UID:db7fce93-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24181,Generation:0,CreationTimestamp:2019-06-06 14:17:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 d696cfa0-8865-11e9-bdc9-0231d0af67bc 0xc002b4e800 0xc002b4e801}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-89-18.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b4e860} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b4e880}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:46 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.821: INFO: Pod "nginx-deployment-6f478d8d8-6c74t" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-6c74t,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-6f478d8d8-6c74t,UID:d69c86f2-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24021,Generation:0,CreationTimestamp:2019-06-06 14:17:38 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{cni.projectcalico.org/podIP: 100.96.2.232/32,kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 d696cfa0-8865-11e9-bdc9-0231d0af67bc 0xc002b4e910 0xc002b4e911}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-66-200.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b4e970} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b4e990}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:38 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:41 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:41 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:38 +0000 UTC  }],Message:,Reason:,HostIP:172.16.66.200,PodIP:100.96.2.232,StartTime:2019-06-06 14:17:38 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-06 14:17:41 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://f0945c41b0493a6cfa241287c48266831a6ebdff4b48903a88ecba9ab00a0350}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.821: INFO: Pod "nginx-deployment-6f478d8d8-6f5w9" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-6f5w9,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-6f478d8d8-6f5w9,UID:d69aec8b-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24018,Generation:0,CreationTimestamp:2019-06-06 14:17:38 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{cni.projectcalico.org/podIP: 100.96.2.231/32,kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 d696cfa0-8865-11e9-bdc9-0231d0af67bc 0xc002b4ea77 0xc002b4ea78}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-66-200.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b4eae0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b4eb00}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:38 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:41 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:41 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:38 +0000 UTC  }],Message:,Reason:,HostIP:172.16.66.200,PodIP:100.96.2.231,StartTime:2019-06-06 14:17:38 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-06 14:17:41 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://c869f9433fd5ae590956cf37f83779d4d183e93617647e8af7c6287b76c0edc9}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.821: INFO: Pod "nginx-deployment-6f478d8d8-6n8s8" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-6n8s8,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-6f478d8d8-6n8s8,UID:d69a62e6-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:23999,Generation:0,CreationTimestamp:2019-06-06 14:17:38 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{cni.projectcalico.org/podIP: 100.96.1.70/32,kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 d696cfa0-8865-11e9-bdc9-0231d0af67bc 0xc002b4ebe7 0xc002b4ebe8}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-89-18.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b4ec50} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b4ec70}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:38 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:40 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:40 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:38 +0000 UTC  }],Message:,Reason:,HostIP:172.16.89.18,PodIP:100.96.1.70,StartTime:2019-06-06 14:17:38 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-06 14:17:40 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://9e00508a0c4bede0fb7ccb6e12e7acf991d1820184c6475fd83f7753347318a4}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.822: INFO: Pod "nginx-deployment-6f478d8d8-bc9mt" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-bc9mt,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-6f478d8d8-bc9mt,UID:d698f9b3-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24035,Generation:0,CreationTimestamp:2019-06-06 14:17:38 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{cni.projectcalico.org/podIP: 100.96.1.69/32,kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 d696cfa0-8865-11e9-bdc9-0231d0af67bc 0xc002b4ed50 0xc002b4ed51}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-89-18.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b4edb0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b4edd0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:38 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:41 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:41 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:38 +0000 UTC  }],Message:,Reason:,HostIP:172.16.89.18,PodIP:100.96.1.69,StartTime:2019-06-06 14:17:38 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-06 14:17:40 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://9fa430a927e1e9cc548a18d30d2c291e328d2d13ad21452467d3e518ff86bf92}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.822: INFO: Pod "nginx-deployment-6f478d8d8-br4n5" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-br4n5,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-6f478d8d8-br4n5,UID:db7c3d6b-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24173,Generation:0,CreationTimestamp:2019-06-06 14:17:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 d696cfa0-8865-11e9-bdc9-0231d0af67bc 0xc002b4eea0 0xc002b4eea1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-66-200.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b4ef00} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b4ef20}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:46 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.822: INFO: Pod "nginx-deployment-6f478d8d8-c7smk" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-c7smk,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-6f478d8d8-c7smk,UID:db81a68d-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24189,Generation:0,CreationTimestamp:2019-06-06 14:17:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 d696cfa0-8865-11e9-bdc9-0231d0af67bc 0xc002b4efa0 0xc002b4efa1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-66-200.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b4f000} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b4f020}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:46 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.822: INFO: Pod "nginx-deployment-6f478d8d8-cb8gf" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-cb8gf,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-6f478d8d8-cb8gf,UID:db7c6ad0-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24175,Generation:0,CreationTimestamp:2019-06-06 14:17:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 d696cfa0-8865-11e9-bdc9-0231d0af67bc 0xc002b4f0a0 0xc002b4f0a1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-89-18.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b4f100} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b4f120}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:46 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.824: INFO: Pod "nginx-deployment-6f478d8d8-cwrff" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-cwrff,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-6f478d8d8-cwrff,UID:db818195-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24184,Generation:0,CreationTimestamp:2019-06-06 14:17:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 d696cfa0-8865-11e9-bdc9-0231d0af67bc 0xc002b4f1a0 0xc002b4f1a1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-66-200.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b4f200} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b4f220}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:46 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.824: INFO: Pod "nginx-deployment-6f478d8d8-gzkxh" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-gzkxh,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-6f478d8d8-gzkxh,UID:db8237a5-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24187,Generation:0,CreationTimestamp:2019-06-06 14:17:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 d696cfa0-8865-11e9-bdc9-0231d0af67bc 0xc002b4f2a0 0xc002b4f2a1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-89-18.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b4f300} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b4f320}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:46 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.824: INFO: Pod "nginx-deployment-6f478d8d8-h6fwc" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-h6fwc,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-6f478d8d8-h6fwc,UID:db7c0cc4-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24172,Generation:0,CreationTimestamp:2019-06-06 14:17:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 d696cfa0-8865-11e9-bdc9-0231d0af67bc 0xc002b4f3a0 0xc002b4f3a1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-89-18.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b4f400} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b4f420}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:46 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.824: INFO: Pod "nginx-deployment-6f478d8d8-j7rz8" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-j7rz8,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-6f478d8d8-j7rz8,UID:db7834e4-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24162,Generation:0,CreationTimestamp:2019-06-06 14:17:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 d696cfa0-8865-11e9-bdc9-0231d0af67bc 0xc002b4f4a0 0xc002b4f4a1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-66-200.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b4f500} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b4f520}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:46 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:46 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:46 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:46 +0000 UTC  }],Message:,Reason:,HostIP:172.16.66.200,PodIP:,StartTime:2019-06-06 14:17:46 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.824: INFO: Pod "nginx-deployment-6f478d8d8-kcjdz" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-kcjdz,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-6f478d8d8-kcjdz,UID:db797a2d-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24170,Generation:0,CreationTimestamp:2019-06-06 14:17:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 d696cfa0-8865-11e9-bdc9-0231d0af67bc 0xc002b4f5e7 0xc002b4f5e8}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-66-200.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b4f650} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b4f670}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:46 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.825: INFO: Pod "nginx-deployment-6f478d8d8-khb99" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-khb99,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-6f478d8d8-khb99,UID:d69dbd89-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24029,Generation:0,CreationTimestamp:2019-06-06 14:17:38 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{cni.projectcalico.org/podIP: 100.96.1.73/32,kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 d696cfa0-8865-11e9-bdc9-0231d0af67bc 0xc002b4f700 0xc002b4f701}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-89-18.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b4f760} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b4f780}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:38 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:41 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:41 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:38 +0000 UTC  }],Message:,Reason:,HostIP:172.16.89.18,PodIP:100.96.1.73,StartTime:2019-06-06 14:17:38 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-06 14:17:40 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://937327d94b1cfa0c0593a1afbcecc2152906df87385b0f8422cca40cdd36aad2}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.827: INFO: Pod "nginx-deployment-6f478d8d8-lhslf" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-lhslf,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-6f478d8d8-lhslf,UID:d69a8a79-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24032,Generation:0,CreationTimestamp:2019-06-06 14:17:38 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{cni.projectcalico.org/podIP: 100.96.1.71/32,kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 d696cfa0-8865-11e9-bdc9-0231d0af67bc 0xc002b4f860 0xc002b4f861}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-89-18.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b4f8c0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b4f8e0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:38 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:41 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:41 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:38 +0000 UTC  }],Message:,Reason:,HostIP:172.16.89.18,PodIP:100.96.1.71,StartTime:2019-06-06 14:17:38 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-06 14:17:40 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://a16fefc23a6115475367dc7b74b1912d9ef46ac01ec560f0e7b398dd99e7d607}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.827: INFO: Pod "nginx-deployment-6f478d8d8-r2ld5" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-r2ld5,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-6f478d8d8-r2ld5,UID:d69906fe-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24039,Generation:0,CreationTimestamp:2019-06-06 14:17:38 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{cni.projectcalico.org/podIP: 100.96.2.233/32,kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 d696cfa0-8865-11e9-bdc9-0231d0af67bc 0xc002b4f9c0 0xc002b4f9c1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-66-200.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b4fa20} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b4fa40}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:38 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:42 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:42 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:38 +0000 UTC  }],Message:,Reason:,HostIP:172.16.66.200,PodIP:100.96.2.233,StartTime:2019-06-06 14:17:38 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-06 14:17:41 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://feea01901fc3d892548a9c6ffa51a31954edc891fc9a3d8c8cd6e606501d7b06}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.827: INFO: Pod "nginx-deployment-6f478d8d8-tkxsm" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-tkxsm,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-6f478d8d8-tkxsm,UID:db7ba02d-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24177,Generation:0,CreationTimestamp:2019-06-06 14:17:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 d696cfa0-8865-11e9-bdc9-0231d0af67bc 0xc002b4fb17 0xc002b4fb18}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-66-200.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b4fb80} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b4fba0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:46 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+Jun  6 14:17:46.828: INFO: Pod "nginx-deployment-6f478d8d8-zmzpg" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-zmzpg,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-50,SelfLink:/api/v1/namespaces/deployment-50/pods/nginx-deployment-6f478d8d8-zmzpg,UID:db8214e0-8865-11e9-bdc9-0231d0af67bc,ResourceVersion:24188,Generation:0,CreationTimestamp:2019-06-06 14:17:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{kubernetes.io/psp: e2e-test-privileged-psp,},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 d696cfa0-8865-11e9-bdc9-0231d0af67bc 0xc002b4fc20 0xc002b4fc21}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vj6tq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vj6tq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-vj6tq true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-16-89-18.ec2.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002b4fc80} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002b4fca0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-06 14:17:46 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:17:46.828: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "deployment-50" for this suite.
+Jun  6 14:17:52.870: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:17:53.243: INFO: namespace deployment-50 deletion completed in 6.405282655s
+
+• [SLOW TEST:14.894 seconds]
+[sig-apps] Deployment
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  deployment should support proportional scaling [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSS
+------------------------------
+[sig-api-machinery] Namespaces [Serial] 
+  should ensure that all pods are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-api-machinery] Namespaces [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:17:53.244: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename namespaces
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in namespaces-7332
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should ensure that all pods are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a test namespace
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in nsdeletetest-5928
+STEP: Waiting for a default service account to be provisioned in namespace
+STEP: Creating a pod in the namespace
+STEP: Waiting for the pod to have running status
+STEP: Deleting the namespace
+STEP: Waiting for the namespace to be removed.
+STEP: Recreating the namespace
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in nsdeletetest-5791
+STEP: Verifying there are no pods in the namespace
+[AfterEach] [sig-api-machinery] Namespaces [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:18:21.655: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "namespaces-7332" for this suite.
+Jun  6 14:18:27.664: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:18:27.748: INFO: namespace namespaces-7332 deletion completed in 6.091202431s
+STEP: Destroying namespace "nsdeletetest-5928" for this suite.
+Jun  6 14:18:27.750: INFO: Namespace nsdeletetest-5928 was already deleted
+STEP: Destroying namespace "nsdeletetest-5791" for this suite.
+Jun  6 14:18:33.757: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:18:33.827: INFO: namespace nsdeletetest-5791 deletion completed in 6.076370529s
+
+• [SLOW TEST:40.583 seconds]
+[sig-api-machinery] Namespaces [Serial]
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should ensure that all pods are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+[sig-apps] ReplicaSet 
+  should adopt matching pods on creation and release no longer matching pods [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-apps] ReplicaSet
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:18:33.827: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename replicaset
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in replicaset-22
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should adopt matching pods on creation and release no longer matching pods [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Given a Pod with a 'name' label pod-adoption-release is created
+STEP: When a replicaset with a matching selector is created
+STEP: Then the orphan pod is adopted
+STEP: When the matched label of one of its pods change
+Jun  6 14:18:36.974: INFO: Pod name pod-adoption-release: Found 1 pods out of 1
+STEP: Then the pod is released
+[AfterEach] [sig-apps] ReplicaSet
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:18:37.984: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "replicaset-22" for this suite.
+Jun  6 14:18:59.994: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:19:00.146: INFO: namespace replicaset-22 deletion completed in 22.159281098s
+
+• [SLOW TEST:26.319 seconds]
+[sig-apps] ReplicaSet
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should adopt matching pods on creation and release no longer matching pods [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSS
+------------------------------
+[sig-storage] Projected secret 
+  should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected secret
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:19:00.147: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-8962
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating projection with secret that has name projected-secret-test-075eea72-8866-11e9-b613-8a9bc7c14a19
+STEP: Creating a pod to test consume secrets
+Jun  6 14:19:00.329: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-075f5d42-8866-11e9-b613-8a9bc7c14a19" in namespace "projected-8962" to be "success or failure"
+Jun  6 14:19:00.334: INFO: Pod "pod-projected-secrets-075f5d42-8866-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 5.62768ms
+Jun  6 14:19:02.337: INFO: Pod "pod-projected-secrets-075f5d42-8866-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.00824084s
+STEP: Saw pod success
+Jun  6 14:19:02.337: INFO: Pod "pod-projected-secrets-075f5d42-8866-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:19:02.339: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-projected-secrets-075f5d42-8866-11e9-b613-8a9bc7c14a19 container projected-secret-volume-test: 
+STEP: delete the pod
+Jun  6 14:19:02.356: INFO: Waiting for pod pod-projected-secrets-075f5d42-8866-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:19:02.358: INFO: Pod pod-projected-secrets-075f5d42-8866-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] Projected secret
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:19:02.358: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-8962" for this suite.
+Jun  6 14:19:08.368: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:19:08.435: INFO: namespace projected-8962 deletion completed in 6.073661449s
+
+• [SLOW TEST:8.288 seconds]
+[sig-storage] Projected secret
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:33
+  should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSS
+------------------------------
+[k8s.io] Kubelet when scheduling a busybox Pod with hostAliases 
+  should write entries to /etc/hosts [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:19:08.435: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename kubelet-test
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in kubelet-test-2671
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37
+[It] should write entries to /etc/hosts [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[AfterEach] [k8s.io] Kubelet
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:19:12.581: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubelet-test-2671" for this suite.
+Jun  6 14:19:58.591: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:19:58.656: INFO: namespace kubelet-test-2671 deletion completed in 46.072469083s
+
+• [SLOW TEST:50.221 seconds]
+[k8s.io] Kubelet
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  when scheduling a busybox Pod with hostAliases
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:136
+    should write entries to /etc/hosts [LinuxOnly] [NodeConformance] [Conformance]
+    /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] InitContainer [NodeConformance] 
+  should invoke init containers on a RestartNever pod [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:19:58.656: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename init-container
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in init-container-1697
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:43
+[It] should invoke init containers on a RestartNever pod [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating the pod
+Jun  6 14:19:58.797: INFO: PodSpec: initContainers in spec.initContainers
+[AfterEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:20:02.663: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "init-container-1697" for this suite.
+Jun  6 14:20:08.673: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:20:08.740: INFO: namespace init-container-1697 deletion completed in 6.074341409s
+
+• [SLOW TEST:10.084 seconds]
+[k8s.io] InitContainer [NodeConformance]
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should invoke init containers on a RestartNever pod [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected secret 
+  should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected secret
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:20:08.740: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-8614
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating projection with secret that has name projected-secret-test-303a566f-8866-11e9-b613-8a9bc7c14a19
+STEP: Creating a pod to test consume secrets
+Jun  6 14:20:08.877: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-303ab212-8866-11e9-b613-8a9bc7c14a19" in namespace "projected-8614" to be "success or failure"
+Jun  6 14:20:08.882: INFO: Pod "pod-projected-secrets-303ab212-8866-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 4.666297ms
+Jun  6 14:20:10.884: INFO: Pod "pod-projected-secrets-303ab212-8866-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006538717s
+STEP: Saw pod success
+Jun  6 14:20:10.884: INFO: Pod "pod-projected-secrets-303ab212-8866-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:20:10.886: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-projected-secrets-303ab212-8866-11e9-b613-8a9bc7c14a19 container projected-secret-volume-test: 
+STEP: delete the pod
+Jun  6 14:20:10.898: INFO: Waiting for pod pod-projected-secrets-303ab212-8866-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:20:10.902: INFO: Pod pod-projected-secrets-303ab212-8866-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] Projected secret
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:20:10.902: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-8614" for this suite.
+Jun  6 14:20:16.910: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:20:16.973: INFO: namespace projected-8614 deletion completed in 6.068914903s
+
+• [SLOW TEST:8.233 seconds]
+[sig-storage] Projected secret
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:33
+  should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Pods 
+  should be updated [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:20:16.973: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename pods
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in pods-2795
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:135
+[It] should be updated [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating the pod
+STEP: submitting the pod to kubernetes
+STEP: verifying the pod is in kubernetes
+STEP: updating the pod
+Jun  6 14:20:19.674: INFO: Successfully updated pod "pod-update-3529ea2c-8866-11e9-b613-8a9bc7c14a19"
+STEP: verifying the updated pod is in kubernetes
+Jun  6 14:20:19.678: INFO: Pod update OK
+[AfterEach] [k8s.io] Pods
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:20:19.678: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pods-2795" for this suite.
+Jun  6 14:20:41.686: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:20:41.750: INFO: namespace pods-2795 deletion completed in 22.0702626s
+
+• [SLOW TEST:24.777 seconds]
+[k8s.io] Pods
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should be updated [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SS
+------------------------------
+[sig-node] Downward API 
+  should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-node] Downward API
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:20:41.750: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in downward-api-9027
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward api env vars
+Jun  6 14:20:41.880: INFO: Waiting up to 5m0s for pod "downward-api-43e6ab84-8866-11e9-b613-8a9bc7c14a19" in namespace "downward-api-9027" to be "success or failure"
+Jun  6 14:20:41.882: INFO: Pod "downward-api-43e6ab84-8866-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.325369ms
+Jun  6 14:20:43.885: INFO: Pod "downward-api-43e6ab84-8866-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.004638645s
+STEP: Saw pod success
+Jun  6 14:20:43.885: INFO: Pod "downward-api-43e6ab84-8866-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:20:43.887: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod downward-api-43e6ab84-8866-11e9-b613-8a9bc7c14a19 container dapi-container: 
+STEP: delete the pod
+Jun  6 14:20:43.899: INFO: Waiting for pod downward-api-43e6ab84-8866-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:20:43.901: INFO: Pod downward-api-43e6ab84-8866-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-node] Downward API
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:20:43.901: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-9027" for this suite.
+Jun  6 14:20:49.911: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:20:49.979: INFO: namespace downward-api-9027 deletion completed in 6.075106104s
+
+• [SLOW TEST:8.229 seconds]
+[sig-node] Downward API
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:38
+  should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSS
+------------------------------
+[sig-storage] ConfigMap 
+  should be consumable from pods in volume with mappings as non-root [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:20:49.980: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename configmap
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in configmap-5224
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings as non-root [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating configMap with name configmap-test-volume-map-48da36ce-8866-11e9-b613-8a9bc7c14a19
+STEP: Creating a pod to test consume configMaps
+Jun  6 14:20:50.194: INFO: Waiting up to 5m0s for pod "pod-configmaps-48dadd8e-8866-11e9-b613-8a9bc7c14a19" in namespace "configmap-5224" to be "success or failure"
+Jun  6 14:20:50.214: INFO: Pod "pod-configmaps-48dadd8e-8866-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 5.582217ms
+Jun  6 14:20:52.220: INFO: Pod "pod-configmaps-48dadd8e-8866-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.011765759s
+STEP: Saw pod success
+Jun  6 14:20:52.221: INFO: Pod "pod-configmaps-48dadd8e-8866-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:20:52.224: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-configmaps-48dadd8e-8866-11e9-b613-8a9bc7c14a19 container configmap-volume-test: 
+STEP: delete the pod
+Jun  6 14:20:52.240: INFO: Waiting for pod pod-configmaps-48dadd8e-8866-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:20:52.245: INFO: Pod pod-configmaps-48dadd8e-8866-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:20:52.245: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "configmap-5224" for this suite.
+Jun  6 14:20:58.259: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:20:58.348: INFO: namespace configmap-5224 deletion completed in 6.096222518s
+
+• [SLOW TEST:8.368 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32
+  should be consumable from pods in volume with mappings as non-root [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SS
+------------------------------
+[sig-storage] ConfigMap 
+  should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:20:58.348: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename configmap
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in configmap-1967
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating configMap with name configmap-test-volume-4dcb8189-8866-11e9-b613-8a9bc7c14a19
+STEP: Creating a pod to test consume configMaps
+Jun  6 14:20:58.485: INFO: Waiting up to 5m0s for pod "pod-configmaps-4dcbe8d8-8866-11e9-b613-8a9bc7c14a19" in namespace "configmap-1967" to be "success or failure"
+Jun  6 14:20:58.488: INFO: Pod "pod-configmaps-4dcbe8d8-8866-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.856198ms
+Jun  6 14:21:00.491: INFO: Pod "pod-configmaps-4dcbe8d8-8866-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.005794299s
+STEP: Saw pod success
+Jun  6 14:21:00.491: INFO: Pod "pod-configmaps-4dcbe8d8-8866-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:21:00.493: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-configmaps-4dcbe8d8-8866-11e9-b613-8a9bc7c14a19 container configmap-volume-test: 
+STEP: delete the pod
+Jun  6 14:21:00.508: INFO: Waiting for pod pod-configmaps-4dcbe8d8-8866-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:21:00.510: INFO: Pod pod-configmaps-4dcbe8d8-8866-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:21:00.510: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "configmap-1967" for this suite.
+Jun  6 14:21:06.519: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:21:06.589: INFO: namespace configmap-1967 deletion completed in 6.077696518s
+
+• [SLOW TEST:8.242 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32
+  should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should provide container's cpu request [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:21:06.590: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in downward-api-2012
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should provide container's cpu request [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward API volume plugin
+Jun  6 14:21:06.720: INFO: Waiting up to 5m0s for pod "downwardapi-volume-52b4e5d9-8866-11e9-b613-8a9bc7c14a19" in namespace "downward-api-2012" to be "success or failure"
+Jun  6 14:21:06.725: INFO: Pod "downwardapi-volume-52b4e5d9-8866-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 5.591176ms
+Jun  6 14:21:08.728: INFO: Pod "downwardapi-volume-52b4e5d9-8866-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.008294258s
+STEP: Saw pod success
+Jun  6 14:21:08.728: INFO: Pod "downwardapi-volume-52b4e5d9-8866-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:21:08.730: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod downwardapi-volume-52b4e5d9-8866-11e9-b613-8a9bc7c14a19 container client-container: 
+STEP: delete the pod
+Jun  6 14:21:08.756: INFO: Waiting for pod downwardapi-volume-52b4e5d9-8866-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:21:08.766: INFO: Pod downwardapi-volume-52b4e5d9-8866-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:21:08.766: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-2012" for this suite.
+Jun  6 14:21:14.776: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:21:14.843: INFO: namespace downward-api-2012 deletion completed in 6.075225497s
+
+• [SLOW TEST:8.254 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should provide container's cpu request [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:21:14.844: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in emptydir-8236
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test emptydir 0666 on node default medium
+Jun  6 14:21:14.993: INFO: Waiting up to 5m0s for pod "pod-57a37f84-8866-11e9-b613-8a9bc7c14a19" in namespace "emptydir-8236" to be "success or failure"
+Jun  6 14:21:15.000: INFO: Pod "pod-57a37f84-8866-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 3.638535ms
+Jun  6 14:21:17.003: INFO: Pod "pod-57a37f84-8866-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.006338869s
+STEP: Saw pod success
+Jun  6 14:21:17.003: INFO: Pod "pod-57a37f84-8866-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:21:17.005: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-57a37f84-8866-11e9-b613-8a9bc7c14a19 container test-container: 
+STEP: delete the pod
+Jun  6 14:21:17.019: INFO: Waiting for pod pod-57a37f84-8866-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:21:17.021: INFO: Pod pod-57a37f84-8866-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:21:17.021: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-8236" for this suite.
+Jun  6 14:21:23.033: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:21:23.180: INFO: namespace emptydir-8236 deletion completed in 6.155102505s
+
+• [SLOW TEST:8.336 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSS
+------------------------------
+[k8s.io] Docker Containers 
+  should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:21:23.181: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename containers
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in containers-4991
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test override command
+Jun  6 14:21:23.315: INFO: Waiting up to 5m0s for pod "client-containers-5c98a807-8866-11e9-b613-8a9bc7c14a19" in namespace "containers-4991" to be "success or failure"
+Jun  6 14:21:23.318: INFO: Pod "client-containers-5c98a807-8866-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.758973ms
+Jun  6 14:21:25.320: INFO: Pod "client-containers-5c98a807-8866-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.005278207s
+STEP: Saw pod success
+Jun  6 14:21:25.320: INFO: Pod "client-containers-5c98a807-8866-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:21:25.323: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod client-containers-5c98a807-8866-11e9-b613-8a9bc7c14a19 container test-container: 
+STEP: delete the pod
+Jun  6 14:21:25.335: INFO: Waiting for pod client-containers-5c98a807-8866-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:21:25.338: INFO: Pod client-containers-5c98a807-8866-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:21:25.338: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "containers-4991" for this suite.
+Jun  6 14:21:31.346: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:21:31.473: INFO: namespace containers-4991 deletion completed in 6.133579399s
+
+• [SLOW TEST:8.293 seconds]
+[k8s.io] Docker Containers
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSS
+------------------------------
+[k8s.io] Probing container 
+  should have monotonically increasing restart count [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:21:31.474: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename container-probe
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in container-probe-3479
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:51
+[It] should have monotonically increasing restart count [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating pod liveness-http in namespace container-probe-3479
+Jun  6 14:21:33.620: INFO: Started pod liveness-http in namespace container-probe-3479
+STEP: checking the pod's current state and verifying that restartCount is present
+Jun  6 14:21:33.622: INFO: Initial restart count of pod liveness-http is 0
+Jun  6 14:21:43.635: INFO: Restart count of pod container-probe-3479/liveness-http is now 1 (10.013583088s elapsed)
+Jun  6 14:22:05.676: INFO: Restart count of pod container-probe-3479/liveness-http is now 2 (32.054192024s elapsed)
+Jun  6 14:22:25.722: INFO: Restart count of pod container-probe-3479/liveness-http is now 3 (52.100362513s elapsed)
+Jun  6 14:22:45.828: INFO: Restart count of pod container-probe-3479/liveness-http is now 4 (1m12.20602448s elapsed)
+Jun  6 14:23:45.907: INFO: Restart count of pod container-probe-3479/liveness-http is now 5 (2m12.284988795s elapsed)
+STEP: deleting the pod
+[AfterEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:23:45.918: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-probe-3479" for this suite.
+Jun  6 14:23:51.929: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:23:52.002: INFO: namespace container-probe-3479 deletion completed in 6.081101647s
+
+• [SLOW TEST:140.528 seconds]
+[k8s.io] Probing container
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should have monotonically increasing restart count [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSS
+------------------------------
+[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] 
+  should perform canary updates and phased rolling updates of template modifications [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:23:52.002: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename statefulset
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in statefulset-9268
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:59
+[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:74
+STEP: Creating service test in namespace statefulset-9268
+[It] should perform canary updates and phased rolling updates of template modifications [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a new StatefulSet
+Jun  6 14:23:52.152: INFO: Found 0 stateful pods, waiting for 3
+Jun  6 14:24:02.155: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true
+Jun  6 14:24:02.155: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true
+Jun  6 14:24:02.155: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true
+STEP: Updating stateful set template: update image from docker.io/library/nginx:1.14-alpine to docker.io/library/nginx:1.15-alpine
+Jun  6 14:24:02.178: INFO: Updating stateful set ss2
+STEP: Creating a new revision
+STEP: Not applying an update when the partition is greater than the number of replicas
+STEP: Performing a canary update
+Jun  6 14:24:12.207: INFO: Updating stateful set ss2
+Jun  6 14:24:12.215: INFO: Waiting for Pod statefulset-9268/ss2-2 to have revision ss2-c79899b9 update revision ss2-787997d666
+STEP: Restoring Pods to the correct revision when they are deleted
+Jun  6 14:24:22.260: INFO: Found 2 stateful pods, waiting for 3
+Jun  6 14:24:32.263: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true
+Jun  6 14:24:32.263: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true
+Jun  6 14:24:32.263: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true
+STEP: Performing a phased rolling update
+Jun  6 14:24:32.282: INFO: Updating stateful set ss2
+Jun  6 14:24:32.288: INFO: Waiting for Pod statefulset-9268/ss2-1 to have revision ss2-c79899b9 update revision ss2-787997d666
+Jun  6 14:24:42.310: INFO: Updating stateful set ss2
+Jun  6 14:24:42.322: INFO: Waiting for StatefulSet statefulset-9268/ss2 to complete update
+Jun  6 14:24:42.322: INFO: Waiting for Pod statefulset-9268/ss2-0 to have revision ss2-c79899b9 update revision ss2-787997d666
+[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:85
+Jun  6 14:24:52.327: INFO: Deleting all statefulset in ns statefulset-9268
+Jun  6 14:24:52.329: INFO: Scaling statefulset ss2 to 0
+Jun  6 14:25:02.340: INFO: Waiting for statefulset status.replicas updated to 0
+Jun  6 14:25:02.341: INFO: Deleting statefulset ss2
+[AfterEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:25:02.349: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "statefulset-9268" for this suite.
+Jun  6 14:25:08.360: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:25:08.445: INFO: namespace statefulset-9268 deletion completed in 6.093069029s
+
+• [SLOW TEST:76.443 seconds]
+[sig-apps] StatefulSet
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    should perform canary updates and phased rolling updates of template modifications [Conformance]
+    /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SS
+------------------------------
+[sig-network] Services 
+  should provide secure master service  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:25:08.445: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename services
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in services-3258
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:86
+[It] should provide secure master service  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:25:08.654: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "services-3258" for this suite.
+Jun  6 14:25:14.664: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:25:14.725: INFO: namespace services-3258 deletion completed in 6.068289232s
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:91
+
+• [SLOW TEST:6.279 seconds]
+[sig-network] Services
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22
+  should provide secure master service  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSS
+------------------------------
+[sig-storage] Secrets 
+  should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Secrets
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:25:14.725: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename secrets
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in secrets-1088
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating secret with name secret-test-e6a75d13-8866-11e9-b613-8a9bc7c14a19
+STEP: Creating a pod to test consume secrets
+Jun  6 14:25:14.937: INFO: Waiting up to 5m0s for pod "pod-secrets-e6a7cb67-8866-11e9-b613-8a9bc7c14a19" in namespace "secrets-1088" to be "success or failure"
+Jun  6 14:25:14.946: INFO: Pod "pod-secrets-e6a7cb67-8866-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 9.332435ms
+Jun  6 14:25:16.949: INFO: Pod "pod-secrets-e6a7cb67-8866-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.011892422s
+Jun  6 14:25:18.952: INFO: Pod "pod-secrets-e6a7cb67-8866-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.014918613s
+STEP: Saw pod success
+Jun  6 14:25:18.952: INFO: Pod "pod-secrets-e6a7cb67-8866-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:25:18.954: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-secrets-e6a7cb67-8866-11e9-b613-8a9bc7c14a19 container secret-volume-test: 
+STEP: delete the pod
+Jun  6 14:25:18.969: INFO: Waiting for pod pod-secrets-e6a7cb67-8866-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:25:18.971: INFO: Pod pod-secrets-e6a7cb67-8866-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:25:18.971: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "secrets-1088" for this suite.
+Jun  6 14:25:24.979: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:25:25.054: INFO: namespace secrets-1088 deletion completed in 6.081148115s
+
+• [SLOW TEST:10.329 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:33
+  should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-scheduling] SchedulerPredicates [Serial] 
+  validates that NodeSelector is respected if not matching  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:25:25.054: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename sched-pred
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in sched-pred-7521
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:79
+Jun  6 14:25:25.178: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready
+Jun  6 14:25:25.183: INFO: Waiting for terminating namespaces to be deleted...
+Jun  6 14:25:25.185: INFO: 
+Logging pods the kubelet thinks is on node ip-172-16-66-200.ec2.internal before test
+Jun  6 14:25:25.191: INFO: kublr-monitoring-kube-state-metrics-6fb9c7594b-zqb9d from kube-system started at 2019-06-06 12:03:51 +0000 UTC (2 container statuses recorded)
+Jun  6 14:25:25.191: INFO: 	Container addon-resizer ready: true, restart count 0
+Jun  6 14:25:25.191: INFO: 	Container kube-state-metrics ready: true, restart count 0
+Jun  6 14:25:25.191: INFO: k8s-api-haproxy-313ee916843387945fe68a625784d2a07122c117ee63e285821800170e69f652-ip-172-16-66-200.ec2.internal from kube-system started at  (0 container statuses recorded)
+Jun  6 14:25:25.191: INFO: heapster-v1.6.0-beta.1-6979f49998-zrlsp from kube-system started at 2019-06-06 12:03:25 +0000 UTC (2 container statuses recorded)
+Jun  6 14:25:25.191: INFO: 	Container heapster ready: true, restart count 0
+Jun  6 14:25:25.191: INFO: 	Container heapster-nanny ready: true, restart count 0
+Jun  6 14:25:25.191: INFO: node-local-dns-vq5mj from kube-system started at 2019-06-06 12:02:59 +0000 UTC (1 container statuses recorded)
+Jun  6 14:25:25.191: INFO: 	Container node-cache ready: true, restart count 0
+Jun  6 14:25:25.191: INFO: kublr-logging-rabbitmq-0 from kube-system started at 2019-06-06 12:03:30 +0000 UTC (1 container statuses recorded)
+Jun  6 14:25:25.191: INFO: 	Container rabbitmq ready: true, restart count 0
+Jun  6 14:25:25.191: INFO: metrics-server-v0.3.1-7f597fc6fd-ljsdj from kube-system started at 2019-06-06 12:03:18 +0000 UTC (2 container statuses recorded)
+Jun  6 14:25:25.191: INFO: 	Container metrics-server ready: true, restart count 0
+Jun  6 14:25:25.191: INFO: 	Container metrics-server-nanny ready: true, restart count 0
+Jun  6 14:25:25.191: INFO: canal-kgff5 from kube-system started at 2019-06-06 12:02:46 +0000 UTC (3 container statuses recorded)
+Jun  6 14:25:25.191: INFO: 	Container calico-node ready: true, restart count 0
+Jun  6 14:25:25.191: INFO: 	Container kube-flannel ready: true, restart count 0
+Jun  6 14:25:25.191: INFO: 	Container update-network-condition ready: true, restart count 0
+Jun  6 14:25:25.191: INFO: kube-proxy-7a09f3d398339426fb2660a3d58c4b6a781901227d4954ccce4069e834b95d61-ip-172-16-66-200.ec2.internal from kube-system started at  (0 container statuses recorded)
+Jun  6 14:25:25.191: INFO: sonobuoy-systemd-logs-daemon-set-bc8f4f63e26f462d-d4vcf from heptio-sonobuoy started at 2019-06-06 12:54:36 +0000 UTC (2 container statuses recorded)
+Jun  6 14:25:25.191: INFO: 	Container sonobuoy-worker ready: true, restart count 1
+Jun  6 14:25:25.191: INFO: 	Container systemd-logs ready: true, restart count 1
+Jun  6 14:25:25.191: INFO: kublr-logging-fluentd-es-v2.0.2-pl5tm from kube-system started at 2019-06-06 12:03:30 +0000 UTC (1 container statuses recorded)
+Jun  6 14:25:25.191: INFO: 	Container fluentd-es ready: true, restart count 0
+Jun  6 14:25:25.191: INFO: kublr-logging-rabbitmq-exporter-85b669fcb9-dv2t2 from kube-system started at 2019-06-06 12:03:30 +0000 UTC (1 container statuses recorded)
+Jun  6 14:25:25.191: INFO: 	Container kublr-logging-rabbitmq-exporter ready: true, restart count 0
+Jun  6 14:25:25.191: INFO: 
+Logging pods the kubelet thinks is on node ip-172-16-89-18.ec2.internal before test
+Jun  6 14:25:25.199: INFO: kublr-monitoring-prometheus-fbf8fff5b-l4hmv from kube-system started at 2019-06-06 12:03:34 +0000 UTC (1 container statuses recorded)
+Jun  6 14:25:25.199: INFO: 	Container prometheus ready: true, restart count 0
+Jun  6 14:25:25.199: INFO: k8s-api-haproxy-313ee916843387945fe68a625784d2a07122c117ee63e285821800170e69f652-ip-172-16-89-18.ec2.internal from kube-system started at  (0 container statuses recorded)
+Jun  6 14:25:25.199: INFO: sonobuoy from heptio-sonobuoy started at 2019-06-06 12:54:33 +0000 UTC (1 container statuses recorded)
+Jun  6 14:25:25.199: INFO: 	Container kube-sonobuoy ready: true, restart count 0
+Jun  6 14:25:25.199: INFO: canal-pszff from kube-system started at 2019-06-06 12:02:46 +0000 UTC (3 container statuses recorded)
+Jun  6 14:25:25.199: INFO: 	Container calico-node ready: true, restart count 0
+Jun  6 14:25:25.199: INFO: 	Container kube-flannel ready: true, restart count 0
+Jun  6 14:25:25.199: INFO: 	Container update-network-condition ready: true, restart count 0
+Jun  6 14:25:25.199: INFO: node-local-dns-75dpv from kube-system started at 2019-06-06 12:02:57 +0000 UTC (1 container statuses recorded)
+Jun  6 14:25:25.199: INFO: 	Container node-cache ready: true, restart count 0
+Jun  6 14:25:25.199: INFO: kublr-logging-fluentd-es-v2.0.2-lxzrw from kube-system started at 2019-06-06 12:03:30 +0000 UTC (1 container statuses recorded)
+Jun  6 14:25:25.199: INFO: 	Container fluentd-es ready: true, restart count 0
+Jun  6 14:25:25.199: INFO: kube-proxy-7a09f3d398339426fb2660a3d58c4b6a781901227d4954ccce4069e834b95d61-ip-172-16-89-18.ec2.internal from kube-system started at  (0 container statuses recorded)
+Jun  6 14:25:25.199: INFO: kube-dns-autoscaler-5d6dc48cb8-hnkfq from kube-system started at 2019-06-06 12:02:57 +0000 UTC (1 container statuses recorded)
+Jun  6 14:25:25.199: INFO: 	Container autoscaler ready: true, restart count 0
+Jun  6 14:25:25.199: INFO: sonobuoy-systemd-logs-daemon-set-bc8f4f63e26f462d-k864b from heptio-sonobuoy started at 2019-06-06 12:54:36 +0000 UTC (2 container statuses recorded)
+Jun  6 14:25:25.199: INFO: 	Container sonobuoy-worker ready: true, restart count 1
+Jun  6 14:25:25.199: INFO: 	Container systemd-logs ready: true, restart count 1
+Jun  6 14:25:25.199: INFO: kubernetes-dashboard-57c67b4666-9j6pn from kube-system started at 2019-06-06 12:02:57 +0000 UTC (1 container statuses recorded)
+Jun  6 14:25:25.199: INFO: 	Container kubernetes-dashboard ready: true, restart count 0
+Jun  6 14:25:25.199: INFO: coredns-fb8b8dccf-w9d7l from kube-system started at 2019-06-06 12:03:06 +0000 UTC (1 container statuses recorded)
+Jun  6 14:25:25.199: INFO: 	Container coredns ready: true, restart count 0
+Jun  6 14:25:25.199: INFO: kublr-system-shell-84d985ff44-nwqdk from kube-system started at 2019-06-06 12:03:29 +0000 UTC (1 container statuses recorded)
+Jun  6 14:25:25.199: INFO: 	Container shell ready: true, restart count 0
+Jun  6 14:25:25.199: INFO: tiller-deploy-89688d99f-c4mp7 from kube-system started at 2019-06-06 12:02:58 +0000 UTC (1 container statuses recorded)
+Jun  6 14:25:25.199: INFO: 	Container tiller ready: true, restart count 0
+[It] validates that NodeSelector is respected if not matching  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Trying to schedule Pod with nonempty NodeSelector.
+STEP: Considering event: 
+Type = [Warning], Name = [restricted-pod.15a5a224dfff4faa], Reason = [FailedScheduling], Message = [0/3 nodes are available: 3 node(s) didn't match node selector.]
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:25:26.218: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "sched-pred-7521" for this suite.
+Jun  6 14:25:32.230: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:25:32.297: INFO: namespace sched-pred-7521 deletion completed in 6.07647927s
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:70
+
+• [SLOW TEST:7.243 seconds]
+[sig-scheduling] SchedulerPredicates [Serial]
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:22
+  validates that NodeSelector is respected if not matching  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] HostPath 
+  should give a volume the correct mode [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] HostPath
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:25:32.297: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename hostpath
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in hostpath-5427
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] HostPath
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/host_path.go:37
+[It] should give a volume the correct mode [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test hostPath mode
+Jun  6 14:25:32.430: INFO: Waiting up to 5m0s for pod "pod-host-path-test" in namespace "hostpath-5427" to be "success or failure"
+Jun  6 14:25:32.436: INFO: Pod "pod-host-path-test": Phase="Pending", Reason="", readiness=false. Elapsed: 4.620849ms
+Jun  6 14:25:34.439: INFO: Pod "pod-host-path-test": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.007470398s
+STEP: Saw pod success
+Jun  6 14:25:34.439: INFO: Pod "pod-host-path-test" satisfied condition "success or failure"
+Jun  6 14:25:34.441: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-host-path-test container test-container-1: 
+STEP: delete the pod
+Jun  6 14:25:34.456: INFO: Waiting for pod pod-host-path-test to disappear
+Jun  6 14:25:34.457: INFO: Pod pod-host-path-test no longer exists
+[AfterEach] [sig-storage] HostPath
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:25:34.457: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "hostpath-5427" for this suite.
+Jun  6 14:25:40.468: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:25:40.536: INFO: namespace hostpath-5427 deletion completed in 6.076260349s
+
+• [SLOW TEST:8.239 seconds]
+[sig-storage] HostPath
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/host_path.go:34
+  should give a volume the correct mode [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSS
+------------------------------
+[sig-storage] Projected configMap 
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:25:40.536: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in projected-440
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating configMap with name projected-configmap-test-volume-map-f5fe16eb-8866-11e9-b613-8a9bc7c14a19
+STEP: Creating a pod to test consume configMaps
+Jun  6 14:25:40.671: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-f5fe7df6-8866-11e9-b613-8a9bc7c14a19" in namespace "projected-440" to be "success or failure"
+Jun  6 14:25:40.677: INFO: Pod "pod-projected-configmaps-f5fe7df6-8866-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 6.054349ms
+Jun  6 14:25:42.680: INFO: Pod "pod-projected-configmaps-f5fe7df6-8866-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.008758502s
+STEP: Saw pod success
+Jun  6 14:25:42.680: INFO: Pod "pod-projected-configmaps-f5fe7df6-8866-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:25:42.682: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-projected-configmaps-f5fe7df6-8866-11e9-b613-8a9bc7c14a19 container projected-configmap-volume-test: 
+STEP: delete the pod
+Jun  6 14:25:42.695: INFO: Waiting for pod pod-projected-configmaps-f5fe7df6-8866-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:25:42.697: INFO: Pod pod-projected-configmaps-f5fe7df6-8866-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:25:42.697: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-440" for this suite.
+Jun  6 14:25:48.715: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:25:48.846: INFO: namespace projected-440 deletion completed in 6.147183786s
+
+• [SLOW TEST:8.310 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:33
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl expose 
+  should create services for rc  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:25:48.846: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in kubectl-7589
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213
+[It] should create services for rc  [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating Redis RC
+Jun  6 14:25:48.976: INFO: namespace kubectl-7589
+Jun  6 14:25:48.976: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 create -f - --namespace=kubectl-7589'
+Jun  6 14:25:49.319: INFO: stderr: ""
+Jun  6 14:25:49.319: INFO: stdout: "replicationcontroller/redis-master created\n"
+STEP: Waiting for Redis master to start.
+Jun  6 14:25:50.321: INFO: Selector matched 1 pods for map[app:redis]
+Jun  6 14:25:50.321: INFO: Found 0 / 1
+Jun  6 14:25:51.322: INFO: Selector matched 1 pods for map[app:redis]
+Jun  6 14:25:51.322: INFO: Found 0 / 1
+Jun  6 14:25:52.321: INFO: Selector matched 1 pods for map[app:redis]
+Jun  6 14:25:52.321: INFO: Found 1 / 1
+Jun  6 14:25:52.321: INFO: WaitFor completed with timeout 5m0s.  Pods found = 1 out of 1
+Jun  6 14:25:52.324: INFO: Selector matched 1 pods for map[app:redis]
+Jun  6 14:25:52.324: INFO: ForEach: Found 1 pods from the filter.  Now looping through them.
+Jun  6 14:25:52.324: INFO: wait on redis-master startup in kubectl-7589 
+Jun  6 14:25:52.324: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 logs redis-master-pnm9p redis-master --namespace=kubectl-7589'
+Jun  6 14:25:52.396: INFO: stderr: ""
+Jun  6 14:25:52.396: INFO: stdout: "                _._                                                  \n           _.-``__ ''-._                                             \n      _.-``    `.  `_.  ''-._           Redis 3.2.12 (35a5711f/0) 64 bit\n  .-`` .-```.  ```\\/    _.,_ ''-._                                   \n (    '      ,       .-`  | `,    )     Running in standalone mode\n |`-._`-...-` __...-.``-._|'` _.-'|     Port: 6379\n |    `-._   `._    /     _.-'    |     PID: 1\n  `-._    `-._  `-./  _.-'    _.-'                                   \n |`-._`-._    `-.__.-'    _.-'_.-'|                                  \n |    `-._`-._        _.-'_.-'    |           http://redis.io        \n  `-._    `-._`-.__.-'_.-'    _.-'                                   \n |`-._`-._    `-.__.-'    _.-'_.-'|                                  \n |    `-._`-._        _.-'_.-'    |                                  \n  `-._    `-._`-.__.-'_.-'    _.-'                                   \n      `-._    `-.__.-'    _.-'                                       \n          `-._        _.-'                                           \n              `-.__.-'                                               \n\n1:M 06 Jun 14:25:50.522 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.\n1:M 06 Jun 14:25:50.522 # Server started, Redis version 3.2.12\n1:M 06 Jun 14:25:50.522 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.\n1:M 06 Jun 14:25:50.522 * The server is now ready to accept connections on port 6379\n"
+STEP: exposing RC
+Jun  6 14:25:52.396: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 expose rc redis-master --name=rm2 --port=1234 --target-port=6379 --namespace=kubectl-7589'
+Jun  6 14:25:52.500: INFO: stderr: ""
+Jun  6 14:25:52.500: INFO: stdout: "service/rm2 exposed\n"
+Jun  6 14:25:52.503: INFO: Service rm2 in namespace kubectl-7589 found.
+STEP: exposing service
+Jun  6 14:25:54.507: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-041581163 expose service rm2 --name=rm3 --port=2345 --target-port=6379 --namespace=kubectl-7589'
+Jun  6 14:25:54.590: INFO: stderr: ""
+Jun  6 14:25:54.590: INFO: stdout: "service/rm3 exposed\n"
+Jun  6 14:25:54.592: INFO: Service rm3 in namespace kubectl-7589 found.
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:25:56.596: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-7589" for this suite.
+Jun  6 14:26:18.612: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:26:18.769: INFO: namespace kubectl-7589 deletion completed in 22.170508019s
+
+• [SLOW TEST:29.923 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl expose
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    should create services for rc  [Conformance]
+    /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSS
+------------------------------
+[sig-storage] Secrets 
+  should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Secrets
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  6 14:26:18.769: INFO: >>> kubeConfig: /tmp/kubeconfig-041581163
+STEP: Building a namespace api object, basename secrets
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in secrets-6258
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in secret-namespace-1903
+STEP: Creating secret with name secret-test-0cc7fa1b-8867-11e9-b613-8a9bc7c14a19
+STEP: Creating a pod to test consume secrets
+Jun  6 14:26:19.077: INFO: Waiting up to 5m0s for pod "pod-secrets-0ce2f40c-8867-11e9-b613-8a9bc7c14a19" in namespace "secrets-6258" to be "success or failure"
+Jun  6 14:26:19.080: INFO: Pod "pod-secrets-0ce2f40c-8867-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 3.139825ms
+Jun  6 14:26:21.083: INFO: Pod "pod-secrets-0ce2f40c-8867-11e9-b613-8a9bc7c14a19": Phase="Pending", Reason="", readiness=false. Elapsed: 2.005587331s
+Jun  6 14:26:23.088: INFO: Pod "pod-secrets-0ce2f40c-8867-11e9-b613-8a9bc7c14a19": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.011365597s
+STEP: Saw pod success
+Jun  6 14:26:23.088: INFO: Pod "pod-secrets-0ce2f40c-8867-11e9-b613-8a9bc7c14a19" satisfied condition "success or failure"
+Jun  6 14:26:23.095: INFO: Trying to get logs from node ip-172-16-66-200.ec2.internal pod pod-secrets-0ce2f40c-8867-11e9-b613-8a9bc7c14a19 container secret-volume-test: 
+STEP: delete the pod
+Jun  6 14:26:23.109: INFO: Waiting for pod pod-secrets-0ce2f40c-8867-11e9-b613-8a9bc7c14a19 to disappear
+Jun  6 14:26:23.111: INFO: Pod pod-secrets-0ce2f40c-8867-11e9-b613-8a9bc7c14a19 no longer exists
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  6 14:26:23.111: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "secrets-6258" for this suite.
+Jun  6 14:26:29.119: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:26:29.195: INFO: namespace secrets-6258 deletion completed in 6.082287097s
+STEP: Destroying namespace "secret-namespace-1903" for this suite.
+Jun  6 14:26:35.220: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  6 14:26:35.303: INFO: namespace secret-namespace-1903 deletion completed in 6.108107963s
+
+• [SLOW TEST:16.534 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:33
+  should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.2-beta.0.85+66049e3b21efe1/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSJun  6 14:26:35.303: INFO: Running AfterSuite actions on all nodes
+Jun  6 14:26:35.304: INFO: Running AfterSuite actions on node 1
+Jun  6 14:26:35.304: INFO: Skipping dumping logs from cluster
+
+Ran 204 of 3585 Specs in 5502.866 seconds
+SUCCESS! -- 204 Passed | 0 Failed | 0 Pending | 3381 Skipped PASS
+
+Ginkgo ran 1 suite in 1h31m44.398609382s
+Test Suite Passed
diff --git a/v1.14/kublr/junit_01.xml b/v1.14/kublr/junit_01.xml
new file mode 100644
index 0000000000..2154581f98
--- /dev/null
+++ b/v1.14/kublr/junit_01.xml
@@ -0,0 +1,10350 @@
+
+  
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+  
\ No newline at end of file
diff --git a/v1.14/kublr/sonobuoy.tar.gz b/v1.14/kublr/sonobuoy.tar.gz
new file mode 100644
index 0000000000..3e5012e5d5
Binary files /dev/null and b/v1.14/kublr/sonobuoy.tar.gz differ