Skip to content

Commit

Permalink
e2e: fix tests that are broken because of the image prepull pod
Browse files Browse the repository at this point in the history
Many tests expect all kube-system pods to be running and ready. The newly
added image prepull add-on pod can in the "succeeded" state. This commit fixes
the tests to allow kube-system pods to be succeeded.
  • Loading branch information
yujuhong committed May 25, 2016
1 parent 025b017 commit cd82c54
Show file tree
Hide file tree
Showing 5 changed files with 36 additions and 14 deletions.
30 changes: 26 additions & 4 deletions test/e2e/framework/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -396,6 +396,14 @@ func PodRunningReady(p *api.Pod) (bool, error) {
return true, nil
}

func PodRunningReadyOrSucceeded(p *api.Pod) (bool, error) {
// Check if the phase is succeeded.
if p.Status.Phase == api.PodSucceeded {
return true, nil
}
return PodRunningReady(p)
}

// PodNotReady checks whether pod p's has a ready condition of status false.
func PodNotReady(p *api.Pod) (bool, error) {
// Check the ready condition is false.
Expand Down Expand Up @@ -3217,16 +3225,30 @@ func GetSigner(provider string) (ssh.Signer, error) {
return sshutil.MakePrivateKeySignerFromFile(key)
}

// checkPodsRunning returns whether all pods whose names are listed in podNames
// in namespace ns are running and ready, using c and waiting at most timeout.
// CheckPodsRunningReady returns whether all pods whose names are listed in
// podNames in namespace ns are running and ready, using c and waiting at most
// timeout.
func CheckPodsRunningReady(c *client.Client, ns string, podNames []string, timeout time.Duration) bool {
np, desc := len(podNames), "running and ready"
return CheckPodsCondition(c, ns, podNames, timeout, PodRunningReady, "running and ready")
}

// CheckPodsRunningReadyOrSucceeded returns whether all pods whose names are
// listed in podNames in namespace ns are running and ready, or succeeded; use
// c and waiting at most timeout.
func CheckPodsRunningReadyOrSucceeded(c *client.Client, ns string, podNames []string, timeout time.Duration) bool {
return CheckPodsCondition(c, ns, podNames, timeout, PodRunningReadyOrSucceeded, "running and ready, or succeeded")
}

// CheckPodsCondition returns whether all pods whose names are listed in podNames
// in namespace ns are in the condition, using c and waiting at most timeout.
func CheckPodsCondition(c *client.Client, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool {
np := len(podNames)
Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames)
result := make(chan bool, len(podNames))
for ix := range podNames {
// Launch off pod readiness checkers.
go func(name string) {
err := waitForPodCondition(c, ns, name, desc, timeout, PodRunningReady)
err := waitForPodCondition(c, ns, name, desc, timeout, condition)
result <- err == nil
}(podNames[ix])
}
Expand Down
10 changes: 5 additions & 5 deletions test/e2e/reboot.go
Original file line number Diff line number Diff line change
Expand Up @@ -241,11 +241,11 @@ func rebootNode(c *client.Client, provider, name, rebootCmd string) bool {
podNames = append(podNames, p.ObjectMeta.Name)
}
}
framework.Logf("Node %s has %d pods: %v", name, len(podNames), podNames)
framework.Logf("Node %s has %d assigned pods with no liveness probes: %v", name, len(podNames), podNames)

// For each pod, we do a sanity check to ensure it's running / healthy
// now, as that's what we'll be checking later.
if !framework.CheckPodsRunningReady(c, ns, podNames, framework.PodReadyBeforeTimeout) {
// or succeeded now, as that's what we'll be checking later.
if !framework.CheckPodsRunningReadyOrSucceeded(c, ns, podNames, framework.PodReadyBeforeTimeout) {
printStatusAndLogsForNotReadyPods(c, ns, podNames, pods)
return false
}
Expand All @@ -267,8 +267,8 @@ func rebootNode(c *client.Client, provider, name, rebootCmd string) bool {
}

// Ensure all of the pods that we found on this node before the reboot are
// running / healthy.
if !framework.CheckPodsRunningReady(c, ns, podNames, rebootPodReadyAgainTimeout) {
// running / healthy, or succeeded.
if !framework.CheckPodsRunningReadyOrSucceeded(c, ns, podNames, rebootPodReadyAgainTimeout) {
newPods := ps.List()
printStatusAndLogsForNotReadyPods(c, ns, podNames, newPods)
return false
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/resize_nodes.go
Original file line number Diff line number Diff line change
Expand Up @@ -396,7 +396,7 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
// the cluster is restored to health.
By("waiting for system pods to successfully restart")

err := framework.WaitForPodsRunningReady(api.NamespaceSystem, systemPodsNo, framework.PodReadyBeforeTimeout, map[string]string{})
err := framework.WaitForPodsRunningReady(api.NamespaceSystem, systemPodsNo, framework.PodReadyBeforeTimeout, framework.ImagePullerLabels)
Expect(err).NotTo(HaveOccurred())
})

Expand Down
6 changes: 3 additions & 3 deletions test/e2e/restart.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,8 +82,8 @@ var _ = framework.KubeDescribe("Restart [Disruptive]", func() {
podNamesBefore[i] = p.ObjectMeta.Name
}
ns := api.NamespaceSystem
if !framework.CheckPodsRunningReady(f.Client, ns, podNamesBefore, framework.PodReadyBeforeTimeout) {
framework.Failf("At least one pod wasn't running and ready at test start.")
if !framework.CheckPodsRunningReadyOrSucceeded(f.Client, ns, podNamesBefore, framework.PodReadyBeforeTimeout) {
framework.Failf("At least one pod wasn't running and ready or succeeded at test start.")
}

By("restarting all of the nodes")
Expand Down Expand Up @@ -111,7 +111,7 @@ var _ = framework.KubeDescribe("Restart [Disruptive]", func() {
podNamesAfter, err := waitForNPods(ps, len(podNamesBefore), restartPodReadyAgainTimeout)
Expect(err).NotTo(HaveOccurred())
remaining := restartPodReadyAgainTimeout - time.Since(podCheckStart)
if !framework.CheckPodsRunningReady(f.Client, ns, podNamesAfter, remaining) {
if !framework.CheckPodsRunningReadyOrSucceeded(f.Client, ns, podNamesAfter, remaining) {
framework.Failf("At least one pod wasn't running and ready after the restart.")
}
})
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/scheduler_predicates.go
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
}
}

err = framework.WaitForPodsRunningReady(api.NamespaceSystem, int32(systemPodsNo), framework.PodReadyBeforeTimeout, map[string]string{})
err = framework.WaitForPodsRunningReady(api.NamespaceSystem, int32(systemPodsNo), framework.PodReadyBeforeTimeout, framework.ImagePullerLabels)
Expect(err).NotTo(HaveOccurred())

for _, node := range nodeList.Items {
Expand Down

0 comments on commit cd82c54

Please sign in to comment.