diff --git a/Makefile b/Makefile index 5a81838045..297925460b 100644 --- a/Makefile +++ b/Makefile @@ -56,17 +56,11 @@ ifeq ($(GOOS), linux) endif # If $TESTFLAGS is set, it is passed as extra arguments to 'go test'. -# You can increase test output verbosity with the option '-test.vv'. -# You can select certain tests to run, with `-test.run ` for example: +# You can select certain tests to run, with `-run ` for example: # -# make test-unit TESTFLAGS='-test.run ^TestManifestDigest$' -# -# For integration test, we use [gocheck](https://labix.org/gocheck). -# You can increase test output verbosity with the option '-check.vv'. -# You can limit test selection with `-check.f `, for example: -# -# make test-integration TESTFLAGS='-check.f CopySuite.TestCopy.*' -export TESTFLAGS ?= -v -check.v -test.timeout=15m +# make test-unit TESTFLAGS='-run ^TestManifestDigest$' +# make test-integration TESTFLAGS='-run copySuite.TestCopy.*' +export TESTFLAGS ?= -timeout=15m # This is assumed to be set non-empty when operating inside a CI/automation environment CI ?= diff --git a/go.mod b/go.mod index e89c7698ad..b372b0bbea 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,6 @@ require ( github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 golang.org/x/exp v0.0.0-20230206171751-46f607a40771 golang.org/x/term v0.5.0 - gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c gopkg.in/yaml.v3 v3.0.1 ) @@ -73,8 +72,6 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.15.15 // indirect github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 // indirect - github.com/kr/pretty v0.3.0 // indirect - github.com/kr/text v0.2.0 // indirect github.com/leodido/go-urn v1.2.1 // indirect github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -96,7 +93,6 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/proglottis/gpgme v0.1.3 // indirect github.com/rivo/uniseg v0.4.3 // indirect - github.com/rogpeppe/go-internal v1.8.0 // indirect github.com/russross/blackfriday v2.0.0+incompatible // indirect github.com/segmentio/ksuid v1.0.4 // indirect github.com/sigstore/fulcio v1.0.0 // indirect diff --git a/integration/blocked_test.go b/integration/blocked_test.go index 1a488fdaee..f65918930b 100644 --- a/integration/blocked_test.go +++ b/integration/blocked_test.go @@ -1,34 +1,34 @@ package main -import ( - "gopkg.in/check.v1" -) - const blockedRegistriesConf = "./fixtures/blocked-registries.conf" const blockedErrorRegex = `.*registry registry-blocked.com is blocked in .*` -func (s *SkopeoSuite) TestCopyBlockedSource(c *check.C) { - assertSkopeoFails(c, blockedErrorRegex, +func (s *skopeoSuite) TestCopyBlockedSource() { + t := s.T() + assertSkopeoFails(t, blockedErrorRegex, "--registries-conf", blockedRegistriesConf, "copy", "docker://registry-blocked.com/image:test", "docker://registry-unblocked.com/image:test") } -func (s *SkopeoSuite) TestCopyBlockedDestination(c *check.C) { - assertSkopeoFails(c, blockedErrorRegex, +func (s *skopeoSuite) TestCopyBlockedDestination() { + t := s.T() + assertSkopeoFails(t, blockedErrorRegex, "--registries-conf", blockedRegistriesConf, "copy", "docker://registry-unblocked.com/image:test", "docker://registry-blocked.com/image:test") } -func (s *SkopeoSuite) TestInspectBlocked(c *check.C) { - assertSkopeoFails(c, blockedErrorRegex, +func (s *skopeoSuite) TestInspectBlocked() { + t := s.T() + assertSkopeoFails(t, blockedErrorRegex, "--registries-conf", blockedRegistriesConf, "inspect", "docker://registry-blocked.com/image:test") } -func (s *SkopeoSuite) TestDeleteBlocked(c *check.C) { - assertSkopeoFails(c, blockedErrorRegex, +func (s *skopeoSuite) TestDeleteBlocked() { + t := s.T() + assertSkopeoFails(t, blockedErrorRegex, "--registries-conf", blockedRegistriesConf, "delete", "docker://registry-blocked.com/image:test") } diff --git a/integration/check_test.go b/integration/check_test.go index 6b6a9c64d5..76e94265ed 100644 --- a/integration/check_test.go +++ b/integration/check_test.go @@ -6,7 +6,9 @@ import ( "testing" "github.com/containers/skopeo/version" - "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" ) const ( @@ -14,95 +16,105 @@ const ( privateRegistryURL1 = "127.0.0.1:5001" ) -func Test(t *testing.T) { - check.TestingT(t) +func TestSkopeo(t *testing.T) { + suite.Run(t, &skopeoSuite{}) } -func init() { - check.Suite(&SkopeoSuite{}) -} - -type SkopeoSuite struct { +type skopeoSuite struct { + suite.Suite regV2 *testRegistryV2 regV2WithAuth *testRegistryV2 } -func (s *SkopeoSuite) SetUpSuite(c *check.C) { +var _ = suite.SetupAllSuite(&skopeoSuite{}) +var _ = suite.TearDownAllSuite(&skopeoSuite{}) + +func (s *skopeoSuite) SetupSuite() { + t := s.T() _, err := exec.LookPath(skopeoBinary) - c.Assert(err, check.IsNil) - s.regV2 = setupRegistryV2At(c, privateRegistryURL0, false, false) - s.regV2WithAuth = setupRegistryV2At(c, privateRegistryURL1, true, false) + require.NoError(t, err) + s.regV2 = setupRegistryV2At(t, privateRegistryURL0, false, false) + s.regV2WithAuth = setupRegistryV2At(t, privateRegistryURL1, true, false) } -func (s *SkopeoSuite) TearDownSuite(c *check.C) { +func (s *skopeoSuite) TearDownSuite() { + t := s.T() if s.regV2 != nil { - s.regV2.tearDown(c) + s.regV2.tearDown(t) } if s.regV2WithAuth != nil { // cmd := exec.Command("docker", "logout", s.regV2WithAuth) - // c.Assert(cmd.Run(), check.IsNil) - s.regV2WithAuth.tearDown(c) + // require.Noerror(t, cmd.Run()) + s.regV2WithAuth.tearDown(t) } } -func (s *SkopeoSuite) TestVersion(c *check.C) { - assertSkopeoSucceeds(c, fmt.Sprintf(".*%s version %s.*", skopeoBinary, version.Version), +func (s *skopeoSuite) TestVersion() { + t := s.T() + assertSkopeoSucceeds(t, fmt.Sprintf(".*%s version %s.*", skopeoBinary, version.Version), "--version") } -func (s *SkopeoSuite) TestCanAuthToPrivateRegistryV2WithoutDockerCfg(c *check.C) { - assertSkopeoFails(c, ".*manifest unknown.*", +func (s *skopeoSuite) TestCanAuthToPrivateRegistryV2WithoutDockerCfg() { + t := s.T() + assertSkopeoFails(t, ".*manifest unknown.*", "--tls-verify=false", "inspect", "--creds="+s.regV2WithAuth.username+":"+s.regV2WithAuth.password, fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url)) } -func (s *SkopeoSuite) TestNeedAuthToPrivateRegistryV2WithoutDockerCfg(c *check.C) { - assertSkopeoFails(c, ".*authentication required.*", +func (s *skopeoSuite) TestNeedAuthToPrivateRegistryV2WithoutDockerCfg() { + t := s.T() + assertSkopeoFails(t, ".*authentication required.*", "--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url)) } -func (s *SkopeoSuite) TestCertDirInsteadOfCertPath(c *check.C) { - assertSkopeoFails(c, ".*unknown flag: --cert-path.*", +func (s *skopeoSuite) TestCertDirInsteadOfCertPath() { + t := s.T() + assertSkopeoFails(t, ".*unknown flag: --cert-path.*", "--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), "--cert-path=/") - assertSkopeoFails(c, ".*authentication required.*", + assertSkopeoFails(t, ".*authentication required.*", "--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), "--cert-dir=/etc/docker/certs.d/") } // TODO(runcom): as soon as we can push to registries ensure you can inspect here // not just get image not found :) -func (s *SkopeoSuite) TestNoNeedAuthToPrivateRegistryV2ImageNotFound(c *check.C) { +func (s *skopeoSuite) TestNoNeedAuthToPrivateRegistryV2ImageNotFound() { + t := s.T() out, err := exec.Command(skopeoBinary, "--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2.url)).CombinedOutput() - c.Assert(err, check.NotNil, check.Commentf(string(out))) - c.Assert(string(out), check.Matches, "(?s).*manifest unknown.*") // (?s) : '.' will also match newlines - c.Assert(string(out), check.Not(check.Matches), "(?s).*unauthorized: authentication required.*") // (?s) : '.' will also match newlines + assert.Error(t, err, "%s", string(out)) + assert.Regexp(t, "(?s).*manifest unknown.*", string(out)) // (?s) : '.' will also match newlines + assert.NotRegexp(t, "(?s).*unauthorized: authentication required.*", string(out)) // (?s) : '.' will also match newlines } -func (s *SkopeoSuite) TestInspectFailsWhenReferenceIsInvalid(c *check.C) { - assertSkopeoFails(c, `.*Invalid image name.*`, "inspect", "unknown") +func (s *skopeoSuite) TestInspectFailsWhenReferenceIsInvalid() { + t := s.T() + assertSkopeoFails(t, `.*Invalid image name.*`, "inspect", "unknown") } -func (s *SkopeoSuite) TestLoginLogout(c *check.C) { - assertSkopeoSucceeds(c, "^Login Succeeded!\n$", +func (s *skopeoSuite) TestLoginLogout() { + t := s.T() + assertSkopeoSucceeds(t, "^Login Succeeded!\n$", "login", "--tls-verify=false", "--username="+s.regV2WithAuth.username, "--password="+s.regV2WithAuth.password, s.regV2WithAuth.url) // test --get-login returns username - assertSkopeoSucceeds(c, fmt.Sprintf("^%s\n$", s.regV2WithAuth.username), + assertSkopeoSucceeds(t, fmt.Sprintf("^%s\n$", s.regV2WithAuth.username), "login", "--tls-verify=false", "--get-login", s.regV2WithAuth.url) // test logout - assertSkopeoSucceeds(c, fmt.Sprintf("^Removed login credentials for %s\n$", s.regV2WithAuth.url), + assertSkopeoSucceeds(t, fmt.Sprintf("^Removed login credentials for %s\n$", s.regV2WithAuth.url), "logout", s.regV2WithAuth.url) } -func (s *SkopeoSuite) TestCopyWithLocalAuth(c *check.C) { - assertSkopeoSucceeds(c, "^Login Succeeded!\n$", +func (s *skopeoSuite) TestCopyWithLocalAuth() { + t := s.T() + assertSkopeoSucceeds(t, "^Login Succeeded!\n$", "login", "--tls-verify=false", "--username="+s.regV2WithAuth.username, "--password="+s.regV2WithAuth.password, s.regV2WithAuth.url) // copy to private registry using local authentication imageName := fmt.Sprintf("docker://%s/busybox:mine", s.regV2WithAuth.url) - assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", testFQIN+":latest", imageName) + assertSkopeoSucceeds(t, "", "copy", "--dest-tls-verify=false", testFQIN+":latest", imageName) // inspect from private registry - assertSkopeoSucceeds(c, "", "inspect", "--tls-verify=false", imageName) + assertSkopeoSucceeds(t, "", "inspect", "--tls-verify=false", imageName) // logout from the registry - assertSkopeoSucceeds(c, fmt.Sprintf("^Removed login credentials for %s\n$", s.regV2WithAuth.url), + assertSkopeoSucceeds(t, fmt.Sprintf("^Removed login credentials for %s\n$", s.regV2WithAuth.url), "logout", s.regV2WithAuth.url) // inspect from private registry should fail after logout - assertSkopeoFails(c, ".*authentication required.*", + assertSkopeoFails(t, ".*authentication required.*", "inspect", "--tls-verify=false", imageName) } diff --git a/integration/copy_test.go b/integration/copy_test.go index 39a5cea445..1c833af799 100644 --- a/integration/copy_test.go +++ b/integration/copy_test.go @@ -13,6 +13,7 @@ import ( "os" "path/filepath" "strings" + "testing" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/signature" @@ -20,13 +21,11 @@ import ( digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/image-tools/image" - "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" ) -func init() { - check.Suite(&CopySuite{}) -} - const ( v2DockerRegistryURL = "localhost:5555" // Update also policy.json v2s1DockerRegistryURL = "localhost:5556" @@ -34,19 +33,28 @@ const ( knownListImage = "docker://registry.fedoraproject.org/fedora-minimal" // could have either ":latest" or "@sha256:..." appended ) -type CopySuite struct { +func TestCopy(t *testing.T) { + suite.Run(t, ©Suite{}) +} + +type copySuite struct { + suite.Suite cluster *openshiftCluster registry *testRegistryV2 s1Registry *testRegistryV2 gpgHome string } -func (s *CopySuite) SetUpSuite(c *check.C) { +var _ = suite.SetupAllSuite(©Suite{}) +var _ = suite.TearDownAllSuite(©Suite{}) + +func (s *copySuite) SetupSuite() { + t := s.T() if os.Getenv("SKOPEO_CONTAINER_TESTS") != "1" { - c.Skip("Not running in a container, refusing to affect user state") + t.Skip("Not running in a container, refusing to affect user state") } - s.cluster = startOpenshiftCluster(c) // FIXME: Set up TLS for the docker registry port instead of using "--tls-verify=false" all over the place. + s.cluster = startOpenshiftCluster(t) // FIXME: Set up TLS for the docker registry port instead of using "--tls-verify=false" all over the place. for _, stream := range []string{"unsigned", "personal", "official", "naming", "cosigned", "compression", "schema1", "schema2"} { isJSON := fmt.Sprintf(`{ @@ -57,403 +65,426 @@ func (s *CopySuite) SetUpSuite(c *check.C) { }, "spec": {} }`, stream) - runCommandWithInput(c, isJSON, "oc", "create", "-f", "-") + runCommandWithInput(t, isJSON, "oc", "create", "-f", "-") } // FIXME: Set up TLS for the docker registry port instead of using "--tls-verify=false" all over the place. - s.registry = setupRegistryV2At(c, v2DockerRegistryURL, false, false) - s.s1Registry = setupRegistryV2At(c, v2s1DockerRegistryURL, false, true) + s.registry = setupRegistryV2At(t, v2DockerRegistryURL, false, false) + s.s1Registry = setupRegistryV2At(t, v2s1DockerRegistryURL, false, true) - s.gpgHome = c.MkDir() - os.Setenv("GNUPGHOME", s.gpgHome) + s.gpgHome = t.TempDir() + t.Setenv("GNUPGHOME", s.gpgHome) for _, key := range []string{"personal", "official"} { batchInput := fmt.Sprintf("Key-Type: RSA\nName-Real: Test key - %s\nName-email: %s@example.com\n%%no-protection\n%%commit\n", key, key) - runCommandWithInput(c, batchInput, gpgBinary, "--batch", "--gen-key") + runCommandWithInput(t, batchInput, gpgBinary, "--batch", "--gen-key") - out := combinedOutputOfCommand(c, gpgBinary, "--armor", "--export", fmt.Sprintf("%s@example.com", key)) + out := combinedOutputOfCommand(t, gpgBinary, "--armor", "--export", fmt.Sprintf("%s@example.com", key)) err := os.WriteFile(filepath.Join(s.gpgHome, fmt.Sprintf("%s-pubkey.gpg", key)), []byte(out), 0600) - c.Assert(err, check.IsNil) + require.NoError(t, err) } } -func (s *CopySuite) TearDownSuite(c *check.C) { +func (s *copySuite) TearDownSuite() { + t := s.T() if s.registry != nil { - s.registry.tearDown(c) + s.registry.tearDown(t) } if s.s1Registry != nil { - s.s1Registry.tearDown(c) + s.s1Registry.tearDown(t) } if s.cluster != nil { - s.cluster.tearDown(c) + s.cluster.tearDown(t) } } -func (s *CopySuite) TestCopyWithManifestList(c *check.C) { - dir := c.MkDir() - assertSkopeoSucceeds(c, "", "copy", knownListImage, "dir:"+dir) +func (s *copySuite) TestCopyWithManifestList() { + t := s.T() + dir := t.TempDir() + assertSkopeoSucceeds(t, "", "copy", knownListImage, "dir:"+dir) } -func (s *CopySuite) TestCopyAllWithManifestList(c *check.C) { - dir := c.MkDir() - assertSkopeoSucceeds(c, "", "copy", "--all", knownListImage, "dir:"+dir) +func (s *copySuite) TestCopyAllWithManifestList() { + t := s.T() + dir := t.TempDir() + assertSkopeoSucceeds(t, "", "copy", "--all", knownListImage, "dir:"+dir) } -func (s *CopySuite) TestCopyAllWithManifestListRoundTrip(c *check.C) { - oci1 := c.MkDir() - oci2 := c.MkDir() - dir1 := c.MkDir() - dir2 := c.MkDir() - assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", knownListImage, "oci:"+oci1) - assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", "oci:"+oci1, "dir:"+dir1) - assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", "dir:"+dir1, "oci:"+oci2) - assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", "oci:"+oci2, "dir:"+dir2) - assertDirImagesAreEqual(c, dir1, dir2) - out := combinedOutputOfCommand(c, "diff", "-urN", oci1, oci2) - c.Assert(out, check.Equals, "") +func (s *copySuite) TestCopyAllWithManifestListRoundTrip() { + t := s.T() + oci1 := t.TempDir() + oci2 := t.TempDir() + dir1 := t.TempDir() + dir2 := t.TempDir() + assertSkopeoSucceeds(t, "", "copy", "--multi-arch=all", knownListImage, "oci:"+oci1) + assertSkopeoSucceeds(t, "", "copy", "--multi-arch=all", "oci:"+oci1, "dir:"+dir1) + assertSkopeoSucceeds(t, "", "copy", "--multi-arch=all", "dir:"+dir1, "oci:"+oci2) + assertSkopeoSucceeds(t, "", "copy", "--multi-arch=all", "oci:"+oci2, "dir:"+dir2) + assertDirImagesAreEqual(t, dir1, dir2) + out := combinedOutputOfCommand(t, "diff", "-urN", oci1, oci2) + assert.Equal(t, "", out) } -func (s *CopySuite) TestCopyAllWithManifestListConverge(c *check.C) { - oci1 := c.MkDir() - oci2 := c.MkDir() - dir1 := c.MkDir() - dir2 := c.MkDir() - assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", knownListImage, "oci:"+oci1) - assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", "oci:"+oci1, "dir:"+dir1) - assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", "--format", "oci", knownListImage, "dir:"+dir2) - assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", "dir:"+dir2, "oci:"+oci2) - assertDirImagesAreEqual(c, dir1, dir2) - out := combinedOutputOfCommand(c, "diff", "-urN", oci1, oci2) - c.Assert(out, check.Equals, "") +func (s *copySuite) TestCopyAllWithManifestListConverge() { + t := s.T() + oci1 := t.TempDir() + oci2 := t.TempDir() + dir1 := t.TempDir() + dir2 := t.TempDir() + assertSkopeoSucceeds(t, "", "copy", "--multi-arch=all", knownListImage, "oci:"+oci1) + assertSkopeoSucceeds(t, "", "copy", "--multi-arch=all", "oci:"+oci1, "dir:"+dir1) + assertSkopeoSucceeds(t, "", "copy", "--multi-arch=all", "--format", "oci", knownListImage, "dir:"+dir2) + assertSkopeoSucceeds(t, "", "copy", "--multi-arch=all", "dir:"+dir2, "oci:"+oci2) + assertDirImagesAreEqual(t, dir1, dir2) + out := combinedOutputOfCommand(t, "diff", "-urN", oci1, oci2) + assert.Equal(t, "", out) } -func (s *CopySuite) TestCopyNoneWithManifestList(c *check.C) { - dir1 := c.MkDir() - assertSkopeoSucceeds(c, "", "copy", "--multi-arch=index-only", knownListImage, "dir:"+dir1) +func (s *copySuite) TestCopyNoneWithManifestList() { + t := s.T() + dir1 := t.TempDir() + assertSkopeoSucceeds(t, "", "copy", "--multi-arch=index-only", knownListImage, "dir:"+dir1) manifestPath := filepath.Join(dir1, "manifest.json") readManifest, err := os.ReadFile(manifestPath) - c.Assert(err, check.IsNil) + require.NoError(t, err) mimeType := manifest.GuessMIMEType(readManifest) - c.Assert(mimeType, check.Equals, "application/vnd.docker.distribution.manifest.list.v2+json") - out := combinedOutputOfCommand(c, "ls", "-1", dir1) - c.Assert(out, check.Equals, "manifest.json\nversion\n") + assert.Equal(t, "application/vnd.docker.distribution.manifest.list.v2+json", mimeType) + out := combinedOutputOfCommand(t, "ls", "-1", dir1) + assert.Equal(t, "manifest.json\nversion\n", out) } -func (s *CopySuite) TestCopyWithManifestListConverge(c *check.C) { - oci1 := c.MkDir() - oci2 := c.MkDir() - dir1 := c.MkDir() - dir2 := c.MkDir() - assertSkopeoSucceeds(c, "", "copy", knownListImage, "oci:"+oci1) - assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", "oci:"+oci1, "dir:"+dir1) - assertSkopeoSucceeds(c, "", "copy", "--format", "oci", knownListImage, "dir:"+dir2) - assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", "dir:"+dir2, "oci:"+oci2) - assertDirImagesAreEqual(c, dir1, dir2) - out := combinedOutputOfCommand(c, "diff", "-urN", oci1, oci2) - c.Assert(out, check.Equals, "") +func (s *copySuite) TestCopyWithManifestListConverge() { + t := s.T() + oci1 := t.TempDir() + oci2 := t.TempDir() + dir1 := t.TempDir() + dir2 := t.TempDir() + assertSkopeoSucceeds(t, "", "copy", knownListImage, "oci:"+oci1) + assertSkopeoSucceeds(t, "", "copy", "--multi-arch=all", "oci:"+oci1, "dir:"+dir1) + assertSkopeoSucceeds(t, "", "copy", "--format", "oci", knownListImage, "dir:"+dir2) + assertSkopeoSucceeds(t, "", "copy", "--multi-arch=all", "dir:"+dir2, "oci:"+oci2) + assertDirImagesAreEqual(t, dir1, dir2) + out := combinedOutputOfCommand(t, "diff", "-urN", oci1, oci2) + assert.Equal(t, "", out) } -func (s *CopySuite) TestCopyAllWithManifestListStorageFails(c *check.C) { - storage := c.MkDir() +func (s *copySuite) TestCopyAllWithManifestListStorageFails() { + t := s.T() + storage := t.TempDir() storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage) - assertSkopeoFails(c, `.*destination transport .* does not support copying multiple images as a group.*`, "copy", "--multi-arch=all", knownListImage, "containers-storage:"+storage+"test") + assertSkopeoFails(t, `.*destination transport .* does not support copying multiple images as a group.*`, "copy", "--multi-arch=all", knownListImage, "containers-storage:"+storage+"test") } -func (s *CopySuite) TestCopyWithManifestListStorage(c *check.C) { - storage := c.MkDir() +func (s *copySuite) TestCopyWithManifestListStorage() { + t := s.T() + storage := t.TempDir() storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage) - dir1 := c.MkDir() - dir2 := c.MkDir() - assertSkopeoSucceeds(c, "", "copy", knownListImage, "containers-storage:"+storage+"test") - assertSkopeoSucceeds(c, "", "copy", knownListImage, "dir:"+dir1) - assertSkopeoSucceeds(c, "", "copy", "containers-storage:"+storage+"test", "dir:"+dir2) - runDecompressDirs(c, "", dir1, dir2) - assertDirImagesAreEqual(c, dir1, dir2) + dir1 := t.TempDir() + dir2 := t.TempDir() + assertSkopeoSucceeds(t, "", "copy", knownListImage, "containers-storage:"+storage+"test") + assertSkopeoSucceeds(t, "", "copy", knownListImage, "dir:"+dir1) + assertSkopeoSucceeds(t, "", "copy", "containers-storage:"+storage+"test", "dir:"+dir2) + runDecompressDirs(t, "", dir1, dir2) + assertDirImagesAreEqual(t, dir1, dir2) } -func (s *CopySuite) TestCopyWithManifestListStorageMultiple(c *check.C) { - storage := c.MkDir() +func (s *copySuite) TestCopyWithManifestListStorageMultiple() { + t := s.T() + storage := t.TempDir() storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage) - dir1 := c.MkDir() - dir2 := c.MkDir() - assertSkopeoSucceeds(c, "", "--override-arch", "amd64", "copy", knownListImage, "containers-storage:"+storage+"test") - assertSkopeoSucceeds(c, "", "--override-arch", "arm64", "copy", knownListImage, "containers-storage:"+storage+"test") - assertSkopeoSucceeds(c, "", "--override-arch", "arm64", "copy", knownListImage, "dir:"+dir1) - assertSkopeoSucceeds(c, "", "copy", "containers-storage:"+storage+"test", "dir:"+dir2) - runDecompressDirs(c, "", dir1, dir2) - assertDirImagesAreEqual(c, dir1, dir2) + dir1 := t.TempDir() + dir2 := t.TempDir() + assertSkopeoSucceeds(t, "", "--override-arch", "amd64", "copy", knownListImage, "containers-storage:"+storage+"test") + assertSkopeoSucceeds(t, "", "--override-arch", "arm64", "copy", knownListImage, "containers-storage:"+storage+"test") + assertSkopeoSucceeds(t, "", "--override-arch", "arm64", "copy", knownListImage, "dir:"+dir1) + assertSkopeoSucceeds(t, "", "copy", "containers-storage:"+storage+"test", "dir:"+dir2) + runDecompressDirs(t, "", dir1, dir2) + assertDirImagesAreEqual(t, dir1, dir2) } -func (s *CopySuite) TestCopyWithManifestListDigest(c *check.C) { - dir1 := c.MkDir() - dir2 := c.MkDir() - oci1 := c.MkDir() - oci2 := c.MkDir() - m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage) +func (s *copySuite) TestCopyWithManifestListDigest() { + t := s.T() + dir1 := t.TempDir() + dir2 := t.TempDir() + oci1 := t.TempDir() + oci2 := t.TempDir() + m := combinedOutputOfCommand(t, skopeoBinary, "inspect", "--raw", knownListImage) manifestDigest, err := manifest.Digest([]byte(m)) - c.Assert(err, check.IsNil) + require.NoError(t, err) digest := manifestDigest.String() - assertSkopeoSucceeds(c, "", "copy", knownListImage+"@"+digest, "dir:"+dir1) - assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", knownListImage+"@"+digest, "dir:"+dir2) - assertSkopeoSucceeds(c, "", "copy", "dir:"+dir1, "oci:"+oci1) - assertSkopeoSucceeds(c, "", "copy", "dir:"+dir2, "oci:"+oci2) - out := combinedOutputOfCommand(c, "diff", "-urN", oci1, oci2) - c.Assert(out, check.Equals, "") + assertSkopeoSucceeds(t, "", "copy", knownListImage+"@"+digest, "dir:"+dir1) + assertSkopeoSucceeds(t, "", "copy", "--multi-arch=all", knownListImage+"@"+digest, "dir:"+dir2) + assertSkopeoSucceeds(t, "", "copy", "dir:"+dir1, "oci:"+oci1) + assertSkopeoSucceeds(t, "", "copy", "dir:"+dir2, "oci:"+oci2) + out := combinedOutputOfCommand(t, "diff", "-urN", oci1, oci2) + assert.Equal(t, "", out) } -func (s *CopySuite) TestCopyWithDigestfileOutput(c *check.C) { - tempdir := c.MkDir() - dir1 := c.MkDir() +func (s *copySuite) TestCopyWithDigestfileOutput() { + t := s.T() + tempdir := t.TempDir() + dir1 := t.TempDir() digestOutPath := filepath.Join(tempdir, "digest.txt") - assertSkopeoSucceeds(c, "", "copy", "--digestfile="+digestOutPath, knownListImage, "dir:"+dir1) + assertSkopeoSucceeds(t, "", "copy", "--digestfile="+digestOutPath, knownListImage, "dir:"+dir1) readDigest, err := os.ReadFile(digestOutPath) - c.Assert(err, check.IsNil) + require.NoError(t, err) _, err = digest.Parse(string(readDigest)) - c.Assert(err, check.IsNil) + require.NoError(t, err) } -func (s *CopySuite) TestCopyWithManifestListStorageDigest(c *check.C) { - storage := c.MkDir() +func (s *copySuite) TestCopyWithManifestListStorageDigest() { + t := s.T() + storage := t.TempDir() storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage) - dir1 := c.MkDir() - dir2 := c.MkDir() - m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage) + dir1 := t.TempDir() + dir2 := t.TempDir() + m := combinedOutputOfCommand(t, skopeoBinary, "inspect", "--raw", knownListImage) manifestDigest, err := manifest.Digest([]byte(m)) - c.Assert(err, check.IsNil) + require.NoError(t, err) digest := manifestDigest.String() - assertSkopeoSucceeds(c, "", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest) - assertSkopeoSucceeds(c, "", "copy", "containers-storage:"+storage+"test@"+digest, "dir:"+dir1) - assertSkopeoSucceeds(c, "", "copy", knownListImage+"@"+digest, "dir:"+dir2) - runDecompressDirs(c, "", dir1, dir2) - assertDirImagesAreEqual(c, dir1, dir2) + assertSkopeoSucceeds(t, "", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest) + assertSkopeoSucceeds(t, "", "copy", "containers-storage:"+storage+"test@"+digest, "dir:"+dir1) + assertSkopeoSucceeds(t, "", "copy", knownListImage+"@"+digest, "dir:"+dir2) + runDecompressDirs(t, "", dir1, dir2) + assertDirImagesAreEqual(t, dir1, dir2) } -func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArches(c *check.C) { - storage := c.MkDir() +func (s *copySuite) TestCopyWithManifestListStorageDigestMultipleArches() { + t := s.T() + storage := t.TempDir() storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage) - dir1 := c.MkDir() - dir2 := c.MkDir() - m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage) + dir1 := t.TempDir() + dir2 := t.TempDir() + m := combinedOutputOfCommand(t, skopeoBinary, "inspect", "--raw", knownListImage) manifestDigest, err := manifest.Digest([]byte(m)) - c.Assert(err, check.IsNil) + require.NoError(t, err) digest := manifestDigest.String() - assertSkopeoSucceeds(c, "", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest) - assertSkopeoSucceeds(c, "", "copy", "containers-storage:"+storage+"test@"+digest, "dir:"+dir1) - assertSkopeoSucceeds(c, "", "copy", knownListImage+"@"+digest, "dir:"+dir2) - runDecompressDirs(c, "", dir1, dir2) - assertDirImagesAreEqual(c, dir1, dir2) + assertSkopeoSucceeds(t, "", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest) + assertSkopeoSucceeds(t, "", "copy", "containers-storage:"+storage+"test@"+digest, "dir:"+dir1) + assertSkopeoSucceeds(t, "", "copy", knownListImage+"@"+digest, "dir:"+dir2) + runDecompressDirs(t, "", dir1, dir2) + assertDirImagesAreEqual(t, dir1, dir2) } -func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesBothUseListDigest(c *check.C) { - storage := c.MkDir() +func (s *copySuite) TestCopyWithManifestListStorageDigestMultipleArchesBothUseListDigest() { + t := s.T() + storage := t.TempDir() storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage) - m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage) + m := combinedOutputOfCommand(t, skopeoBinary, "inspect", "--raw", knownListImage) manifestDigest, err := manifest.Digest([]byte(m)) - c.Assert(err, check.IsNil) + require.NoError(t, err) digest := manifestDigest.String() _, err = manifest.ListFromBlob([]byte(m), manifest.GuessMIMEType([]byte(m))) - c.Assert(err, check.IsNil) - assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest) - assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest) - assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "containers-storage:"+storage+"test@"+digest) - assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest) - i2 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest) + require.NoError(t, err) + assertSkopeoSucceeds(t, "", "--override-arch=amd64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest) + assertSkopeoSucceeds(t, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest) + assertSkopeoFails(t, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "containers-storage:"+storage+"test@"+digest) + assertSkopeoFails(t, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest) + i2 := combinedOutputOfCommand(t, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest) var image2 imgspecv1.Image err = json.Unmarshal([]byte(i2), &image2) - c.Assert(err, check.IsNil) - c.Assert(image2.Architecture, check.Equals, "arm64") + require.NoError(t, err) + assert.Equal(t, "arm64", image2.Architecture) } -func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesFirstUsesListDigest(c *check.C) { - storage := c.MkDir() +func (s *copySuite) TestCopyWithManifestListStorageDigestMultipleArchesFirstUsesListDigest() { + t := s.T() + storage := t.TempDir() storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage) - m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage) + m := combinedOutputOfCommand(t, skopeoBinary, "inspect", "--raw", knownListImage) manifestDigest, err := manifest.Digest([]byte(m)) - c.Assert(err, check.IsNil) + require.NoError(t, err) digest := manifestDigest.String() list, err := manifest.ListFromBlob([]byte(m), manifest.GuessMIMEType([]byte(m))) - c.Assert(err, check.IsNil) + require.NoError(t, err) amd64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "amd64"}) - c.Assert(err, check.IsNil) + require.NoError(t, err) arm64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "arm64"}) - c.Assert(err, check.IsNil) - assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest) - assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+arm64Instance.String(), "containers-storage:"+storage+"test@"+arm64Instance.String()) - i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest) + require.NoError(t, err) + assertSkopeoSucceeds(t, "", "--override-arch=amd64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest) + assertSkopeoSucceeds(t, "", "--override-arch=arm64", "copy", knownListImage+"@"+arm64Instance.String(), "containers-storage:"+storage+"test@"+arm64Instance.String()) + i1 := combinedOutputOfCommand(t, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest) var image1 imgspecv1.Image err = json.Unmarshal([]byte(i1), &image1) - c.Assert(err, check.IsNil) - c.Assert(image1.Architecture, check.Equals, "amd64") - i2 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+amd64Instance.String()) + require.NoError(t, err) + assert.Equal(t, "amd64", image1.Architecture) + i2 := combinedOutputOfCommand(t, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+amd64Instance.String()) var image2 imgspecv1.Image err = json.Unmarshal([]byte(i2), &image2) - c.Assert(err, check.IsNil) - c.Assert(image2.Architecture, check.Equals, "amd64") - assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=arm64", "inspect", "containers-storage:"+storage+"test@"+digest) - assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest) - i3 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+arm64Instance.String()) + require.NoError(t, err) + assert.Equal(t, "amd64", image2.Architecture) + assertSkopeoFails(t, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=arm64", "inspect", "containers-storage:"+storage+"test@"+digest) + assertSkopeoFails(t, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest) + i3 := combinedOutputOfCommand(t, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+arm64Instance.String()) var image3 imgspecv1.Image err = json.Unmarshal([]byte(i3), &image3) - c.Assert(err, check.IsNil) - c.Assert(image3.Architecture, check.Equals, "arm64") + require.NoError(t, err) + assert.Equal(t, "arm64", image3.Architecture) } -func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesSecondUsesListDigest(c *check.C) { - storage := c.MkDir() +func (s *copySuite) TestCopyWithManifestListStorageDigestMultipleArchesSecondUsesListDigest() { + t := s.T() + storage := t.TempDir() storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage) - m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage) + m := combinedOutputOfCommand(t, skopeoBinary, "inspect", "--raw", knownListImage) manifestDigest, err := manifest.Digest([]byte(m)) - c.Assert(err, check.IsNil) + require.NoError(t, err) digest := manifestDigest.String() list, err := manifest.ListFromBlob([]byte(m), manifest.GuessMIMEType([]byte(m))) - c.Assert(err, check.IsNil) + require.NoError(t, err) amd64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "amd64"}) - c.Assert(err, check.IsNil) + require.NoError(t, err) arm64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "arm64"}) - c.Assert(err, check.IsNil) - assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+amd64Instance.String(), "containers-storage:"+storage+"test@"+amd64Instance.String()) - assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest) - i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+amd64Instance.String()) + require.NoError(t, err) + assertSkopeoSucceeds(t, "", "--override-arch=amd64", "copy", knownListImage+"@"+amd64Instance.String(), "containers-storage:"+storage+"test@"+amd64Instance.String()) + assertSkopeoSucceeds(t, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest) + i1 := combinedOutputOfCommand(t, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+amd64Instance.String()) var image1 imgspecv1.Image err = json.Unmarshal([]byte(i1), &image1) - c.Assert(err, check.IsNil) - c.Assert(image1.Architecture, check.Equals, "amd64") - assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "containers-storage:"+storage+"test@"+digest) - assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest) - i2 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest) + require.NoError(t, err) + assert.Equal(t, "amd64", image1.Architecture) + assertSkopeoFails(t, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "containers-storage:"+storage+"test@"+digest) + assertSkopeoFails(t, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest) + i2 := combinedOutputOfCommand(t, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest) var image2 imgspecv1.Image err = json.Unmarshal([]byte(i2), &image2) - c.Assert(err, check.IsNil) - c.Assert(image2.Architecture, check.Equals, "arm64") - i3 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+arm64Instance.String()) + require.NoError(t, err) + assert.Equal(t, "arm64", image2.Architecture) + i3 := combinedOutputOfCommand(t, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+arm64Instance.String()) var image3 imgspecv1.Image err = json.Unmarshal([]byte(i3), &image3) - c.Assert(err, check.IsNil) - c.Assert(image3.Architecture, check.Equals, "arm64") + require.NoError(t, err) + assert.Equal(t, "arm64", image3.Architecture) } -func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesThirdUsesListDigest(c *check.C) { - storage := c.MkDir() +func (s *copySuite) TestCopyWithManifestListStorageDigestMultipleArchesThirdUsesListDigest() { + t := s.T() + storage := t.TempDir() storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage) - m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage) + m := combinedOutputOfCommand(t, skopeoBinary, "inspect", "--raw", knownListImage) manifestDigest, err := manifest.Digest([]byte(m)) - c.Assert(err, check.IsNil) + require.NoError(t, err) digest := manifestDigest.String() list, err := manifest.ListFromBlob([]byte(m), manifest.GuessMIMEType([]byte(m))) - c.Assert(err, check.IsNil) + require.NoError(t, err) amd64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "amd64"}) - c.Assert(err, check.IsNil) + require.NoError(t, err) arm64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "arm64"}) - c.Assert(err, check.IsNil) - assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+amd64Instance.String(), "containers-storage:"+storage+"test@"+amd64Instance.String()) - assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest) - assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest) - assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest) - i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+amd64Instance.String()) + require.NoError(t, err) + assertSkopeoSucceeds(t, "", "--override-arch=amd64", "copy", knownListImage+"@"+amd64Instance.String(), "containers-storage:"+storage+"test@"+amd64Instance.String()) + assertSkopeoSucceeds(t, "", "--override-arch=amd64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest) + assertSkopeoSucceeds(t, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest) + assertSkopeoFails(t, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest) + i1 := combinedOutputOfCommand(t, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+amd64Instance.String()) var image1 imgspecv1.Image err = json.Unmarshal([]byte(i1), &image1) - c.Assert(err, check.IsNil) - c.Assert(image1.Architecture, check.Equals, "amd64") - i2 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest) + require.NoError(t, err) + assert.Equal(t, "amd64", image1.Architecture) + i2 := combinedOutputOfCommand(t, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest) var image2 imgspecv1.Image err = json.Unmarshal([]byte(i2), &image2) - c.Assert(err, check.IsNil) - c.Assert(image2.Architecture, check.Equals, "arm64") - i3 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+arm64Instance.String()) + require.NoError(t, err) + assert.Equal(t, "arm64", image2.Architecture) + i3 := combinedOutputOfCommand(t, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+arm64Instance.String()) var image3 imgspecv1.Image err = json.Unmarshal([]byte(i3), &image3) - c.Assert(err, check.IsNil) - c.Assert(image3.Architecture, check.Equals, "arm64") + require.NoError(t, err) + assert.Equal(t, "arm64", image3.Architecture) } -func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesTagAndDigest(c *check.C) { - storage := c.MkDir() +func (s *copySuite) TestCopyWithManifestListStorageDigestMultipleArchesTagAndDigest() { + t := s.T() + storage := t.TempDir() storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage) - m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage) + m := combinedOutputOfCommand(t, skopeoBinary, "inspect", "--raw", knownListImage) manifestDigest, err := manifest.Digest([]byte(m)) - c.Assert(err, check.IsNil) + require.NoError(t, err) digest := manifestDigest.String() list, err := manifest.ListFromBlob([]byte(m), manifest.GuessMIMEType([]byte(m))) - c.Assert(err, check.IsNil) + require.NoError(t, err) amd64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "amd64"}) - c.Assert(err, check.IsNil) + require.NoError(t, err) arm64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "arm64"}) - c.Assert(err, check.IsNil) - assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage, "containers-storage:"+storage+"test:latest") - assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest) - assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest) - i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test:latest") + require.NoError(t, err) + assertSkopeoSucceeds(t, "", "--override-arch=amd64", "copy", knownListImage, "containers-storage:"+storage+"test:latest") + assertSkopeoSucceeds(t, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest) + assertSkopeoFails(t, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest) + i1 := combinedOutputOfCommand(t, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test:latest") var image1 imgspecv1.Image err = json.Unmarshal([]byte(i1), &image1) - c.Assert(err, check.IsNil) - c.Assert(image1.Architecture, check.Equals, "amd64") - i2 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+amd64Instance.String()) + require.NoError(t, err) + assert.Equal(t, "amd64", image1.Architecture) + i2 := combinedOutputOfCommand(t, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+amd64Instance.String()) var image2 imgspecv1.Image err = json.Unmarshal([]byte(i2), &image2) - c.Assert(err, check.IsNil) - c.Assert(image2.Architecture, check.Equals, "amd64") - i3 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test:latest") + require.NoError(t, err) + assert.Equal(t, "amd64", image2.Architecture) + i3 := combinedOutputOfCommand(t, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test:latest") var image3 imgspecv1.Image err = json.Unmarshal([]byte(i3), &image3) - c.Assert(err, check.IsNil) - c.Assert(image3.Architecture, check.Equals, "amd64") - i4 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+arm64Instance.String()) + require.NoError(t, err) + assert.Equal(t, "amd64", image3.Architecture) + i4 := combinedOutputOfCommand(t, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+arm64Instance.String()) var image4 imgspecv1.Image err = json.Unmarshal([]byte(i4), &image4) - c.Assert(err, check.IsNil) - c.Assert(image4.Architecture, check.Equals, "arm64") - i5 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest) + require.NoError(t, err) + assert.Equal(t, "arm64", image4.Architecture) + i5 := combinedOutputOfCommand(t, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest) var image5 imgspecv1.Image err = json.Unmarshal([]byte(i5), &image5) - c.Assert(err, check.IsNil) - c.Assert(image5.Architecture, check.Equals, "arm64") + require.NoError(t, err) + assert.Equal(t, "arm64", image5.Architecture) } -func (s *CopySuite) TestCopyFailsWhenImageOSDoesNotMatchRuntimeOS(c *check.C) { - storage := c.MkDir() +func (s *copySuite) TestCopyFailsWhenImageOSDoesNotMatchRuntimeOS() { + t := s.T() + storage := t.TempDir() storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage) - assertSkopeoFails(c, `.*no image found in manifest list for architecture .*, variant .*, OS .*`, "copy", knownWindowsOnlyImage, "containers-storage:"+storage+"test") + assertSkopeoFails(t, `.*no image found in manifest list for architecture .*, variant .*, OS .*`, "copy", knownWindowsOnlyImage, "containers-storage:"+storage+"test") } -func (s *CopySuite) TestCopySucceedsWhenImageDoesNotMatchRuntimeButWeOverride(c *check.C) { - storage := c.MkDir() +func (s *copySuite) TestCopySucceedsWhenImageDoesNotMatchRuntimeButWeOverride() { + t := s.T() + storage := t.TempDir() storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage) - assertSkopeoSucceeds(c, "", "--override-os=windows", "--override-arch=amd64", "copy", knownWindowsOnlyImage, "containers-storage:"+storage+"test") + assertSkopeoSucceeds(t, "", "--override-os=windows", "--override-arch=amd64", "copy", knownWindowsOnlyImage, "containers-storage:"+storage+"test") } -func (s *CopySuite) TestCopySimpleAtomicRegistry(c *check.C) { - dir1 := c.MkDir() - dir2 := c.MkDir() +func (s *copySuite) TestCopySimpleAtomicRegistry() { + t := s.T() + dir1 := t.TempDir() + dir2 := t.TempDir() // FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection. // "pull": docker: → dir: - assertSkopeoSucceeds(c, "", "copy", testFQIN64, "dir:"+dir1) + assertSkopeoSucceeds(t, "", "copy", testFQIN64, "dir:"+dir1) // "push": dir: → atomic: - assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "dir:"+dir1, "atomic:localhost:5000/myns/unsigned:unsigned") + assertSkopeoSucceeds(t, "", "--tls-verify=false", "--debug", "copy", "dir:"+dir1, "atomic:localhost:5000/myns/unsigned:unsigned") // The result of pushing and pulling is an equivalent image, except for schema1 embedded names. - assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/unsigned:unsigned", "dir:"+dir2) - assertSchema1DirImagesAreEqualExceptNames(c, dir1, "libpod/busybox:amd64", dir2, "myns/unsigned:unsigned") + assertSkopeoSucceeds(t, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/unsigned:unsigned", "dir:"+dir2) + assertSchema1DirImagesAreEqualExceptNames(t, dir1, "libpod/busybox:amd64", dir2, "myns/unsigned:unsigned") } // The most basic (skopeo copy) use: -func (s *CopySuite) TestCopySimple(c *check.C) { +func (s *copySuite) TestCopySimple() { + t := s.T() const ourRegistry = "docker://" + v2DockerRegistryURL + "/" - dir1 := c.MkDir() - dir2 := c.MkDir() + dir1 := t.TempDir() + dir2 := t.TempDir() // FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection. // "pull": docker: → dir: - assertSkopeoSucceeds(c, "", "copy", "docker://k8s.gcr.io/pause", "dir:"+dir1) + assertSkopeoSucceeds(t, "", "copy", "docker://k8s.gcr.io/pause", "dir:"+dir1) // "push": dir: → docker(v2s2): - assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "dir:"+dir1, ourRegistry+"pause:unsigned") + assertSkopeoSucceeds(t, "", "--tls-verify=false", "--debug", "copy", "dir:"+dir1, ourRegistry+"pause:unsigned") // The result of pushing and pulling is an unmodified image. - assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", ourRegistry+"pause:unsigned", "dir:"+dir2) - out := combinedOutputOfCommand(c, "diff", "-urN", dir1, dir2) - c.Assert(out, check.Equals, "") + assertSkopeoSucceeds(t, "", "--tls-verify=false", "copy", ourRegistry+"pause:unsigned", "dir:"+dir2) + out := combinedOutputOfCommand(t, "diff", "-urN", dir1, dir2) + assert.Equal(t, "", out) // docker v2s2 -> OCI image layout with image name // ociDest will be created by oci: if it doesn't exist @@ -461,117 +492,118 @@ func (s *CopySuite) TestCopySimple(c *check.C) { ociDest := "pause-latest-image" ociImgName := "pause" defer os.RemoveAll(ociDest) - assertSkopeoSucceeds(c, "", "copy", "docker://k8s.gcr.io/pause:latest", "oci:"+ociDest+":"+ociImgName) + assertSkopeoSucceeds(t, "", "copy", "docker://k8s.gcr.io/pause:latest", "oci:"+ociDest+":"+ociImgName) _, err := os.Stat(ociDest) - c.Assert(err, check.IsNil) + require.NoError(t, err) // docker v2s2 -> OCI image layout without image name ociDest = "pause-latest-noimage" defer os.RemoveAll(ociDest) - assertSkopeoSucceeds(c, "", "copy", "docker://k8s.gcr.io/pause:latest", "oci:"+ociDest) + assertSkopeoSucceeds(t, "", "copy", "docker://k8s.gcr.io/pause:latest", "oci:"+ociDest) _, err = os.Stat(ociDest) - c.Assert(err, check.IsNil) + require.NoError(t, err) } -func (s *CopySuite) TestCopyEncryption(c *check.C) { - originalImageDir := c.MkDir() - encryptedImgDir := c.MkDir() - decryptedImgDir := c.MkDir() - keysDir := c.MkDir() - undecryptedImgDir := c.MkDir() - multiLayerImageDir := c.MkDir() - partiallyEncryptedImgDir := c.MkDir() - partiallyDecryptedImgDir := c.MkDir() +func (s *copySuite) TestCopyEncryption() { + t := s.T() + originalImageDir := t.TempDir() + encryptedImgDir := t.TempDir() + decryptedImgDir := t.TempDir() + keysDir := t.TempDir() + undecryptedImgDir := t.TempDir() + multiLayerImageDir := t.TempDir() + partiallyEncryptedImgDir := t.TempDir() + partiallyDecryptedImgDir := t.TempDir() // Create RSA key pair privateKey, err := rsa.GenerateKey(rand.Reader, 4096) - c.Assert(err, check.IsNil) + require.NoError(t, err) publicKey := &privateKey.PublicKey privateKeyBytes := x509.MarshalPKCS1PrivateKey(privateKey) publicKeyBytes, err := x509.MarshalPKIXPublicKey(publicKey) - c.Assert(err, check.IsNil) + require.NoError(t, err) err = os.WriteFile(keysDir+"/private.key", privateKeyBytes, 0644) - c.Assert(err, check.IsNil) + require.NoError(t, err) err = os.WriteFile(keysDir+"/public.key", publicKeyBytes, 0644) - c.Assert(err, check.IsNil) + require.NoError(t, err) // We can either perform encryption or decryption on the image. // This is why use should not be able to specify both encryption and decryption // during copy at the same time. - assertSkopeoFails(c, ".*--encryption-key and --decryption-key cannot be specified together.*", + assertSkopeoFails(t, ".*--encryption-key and --decryption-key cannot be specified together.*", "copy", "--encryption-key", "jwe:"+keysDir+"/public.key", "--decryption-key", keysDir+"/private.key", "oci:"+encryptedImgDir+":encrypted", "oci:"+decryptedImgDir+":decrypted") - assertSkopeoFails(c, ".*--encryption-key and --decryption-key cannot be specified together.*", + assertSkopeoFails(t, ".*--encryption-key and --decryption-key cannot be specified together.*", "copy", "--decryption-key", keysDir+"/private.key", "--encryption-key", "jwe:"+keysDir+"/public.key", "oci:"+encryptedImgDir+":encrypted", "oci:"+decryptedImgDir+":decrypted") // Copy a standard busybox image locally - assertSkopeoSucceeds(c, "", "copy", testFQIN+":1.30.1", "oci:"+originalImageDir+":latest") + assertSkopeoSucceeds(t, "", "copy", testFQIN+":1.30.1", "oci:"+originalImageDir+":latest") // Encrypt the image - assertSkopeoSucceeds(c, "", "copy", "--encryption-key", + assertSkopeoSucceeds(t, "", "copy", "--encryption-key", "jwe:"+keysDir+"/public.key", "oci:"+originalImageDir+":latest", "oci:"+encryptedImgDir+":encrypted") // An attempt to decrypt an encrypted image without a valid private key should fail invalidPrivateKey, err := rsa.GenerateKey(rand.Reader, 4096) - c.Assert(err, check.IsNil) + require.NoError(t, err) invalidPrivateKeyBytes := x509.MarshalPKCS1PrivateKey(invalidPrivateKey) err = os.WriteFile(keysDir+"/invalid_private.key", invalidPrivateKeyBytes, 0644) - c.Assert(err, check.IsNil) - assertSkopeoFails(c, ".*no suitable key unwrapper found or none of the private keys could be used for decryption.*", + require.NoError(t, err) + assertSkopeoFails(t, ".*no suitable key unwrapper found or none of the private keys could be used for decryption.*", "copy", "--decryption-key", keysDir+"/invalid_private.key", "oci:"+encryptedImgDir+":encrypted", "oci:"+decryptedImgDir+":decrypted") // Copy encrypted image without decrypting it - assertSkopeoSucceeds(c, "", "copy", "oci:"+encryptedImgDir+":encrypted", "oci:"+undecryptedImgDir+":encrypted") + assertSkopeoSucceeds(t, "", "copy", "oci:"+encryptedImgDir+":encrypted", "oci:"+undecryptedImgDir+":encrypted") // Original busybox image has gzipped layers. But encrypted busybox layers should // not be of gzip type - matchLayerBlobBinaryType(c, undecryptedImgDir+"/blobs/sha256", "application/x-gzip", 0) + matchLayerBlobBinaryType(t, undecryptedImgDir+"/blobs/sha256", "application/x-gzip", 0) // Decrypt the image - assertSkopeoSucceeds(c, "", "copy", "--decryption-key", keysDir+"/private.key", + assertSkopeoSucceeds(t, "", "copy", "--decryption-key", keysDir+"/private.key", "oci:"+undecryptedImgDir+":encrypted", "oci:"+decryptedImgDir+":decrypted") // After successful decryption we should find the gzipped layer from the // busybox image - matchLayerBlobBinaryType(c, decryptedImgDir+"/blobs/sha256", "application/x-gzip", 1) + matchLayerBlobBinaryType(t, decryptedImgDir+"/blobs/sha256", "application/x-gzip", 1) // Copy a standard multi layer nginx image locally - assertSkopeoSucceeds(c, "", "copy", testFQINMultiLayer, "oci:"+multiLayerImageDir+":latest") + assertSkopeoSucceeds(t, "", "copy", testFQINMultiLayer, "oci:"+multiLayerImageDir+":latest") // Partially encrypt the image - assertSkopeoSucceeds(c, "", "copy", "--encryption-key", "jwe:"+keysDir+"/public.key", + assertSkopeoSucceeds(t, "", "copy", "--encryption-key", "jwe:"+keysDir+"/public.key", "--encrypt-layer", "1", "oci:"+multiLayerImageDir+":latest", "oci:"+partiallyEncryptedImgDir+":encrypted") // Since the image is partially encrypted we should find layers that aren't encrypted - matchLayerBlobBinaryType(c, partiallyEncryptedImgDir+"/blobs/sha256", "application/x-gzip", 2) + matchLayerBlobBinaryType(t, partiallyEncryptedImgDir+"/blobs/sha256", "application/x-gzip", 2) // Decrypt the partially encrypted image - assertSkopeoSucceeds(c, "", "copy", "--decryption-key", keysDir+"/private.key", + assertSkopeoSucceeds(t, "", "copy", "--decryption-key", keysDir+"/private.key", "oci:"+partiallyEncryptedImgDir+":encrypted", "oci:"+partiallyDecryptedImgDir+":decrypted") // After successful decryption we should find the gzipped layers from the nginx image - matchLayerBlobBinaryType(c, partiallyDecryptedImgDir+"/blobs/sha256", "application/x-gzip", 3) + matchLayerBlobBinaryType(t, partiallyDecryptedImgDir+"/blobs/sha256", "application/x-gzip", 3) } -func matchLayerBlobBinaryType(c *check.C, ociImageDirPath string, contentType string, matchCount int) { +func matchLayerBlobBinaryType(t *testing.T, ociImageDirPath string, contentType string, matchCount int) { files, err := os.ReadDir(ociImageDirPath) - c.Assert(err, check.IsNil) + require.NoError(t, err) foundCount := 0 for _, f := range files { fileContent, err := os.Open(ociImageDirPath + "/" + f.Name()) - c.Assert(err, check.IsNil) + require.NoError(t, err) layerContentType, err := getFileContentType(fileContent) - c.Assert(err, check.IsNil) + require.NoError(t, err) if layerContentType == contentType { foundCount++ } } - c.Assert(foundCount, check.Equals, matchCount) + assert.Equal(t, matchCount, foundCount) } func getFileContentType(out *os.File) (string, error) { @@ -585,63 +617,64 @@ func getFileContentType(out *os.File) (string, error) { } // Check whether dir: images in dir1 and dir2 are equal, ignoring schema1 signatures. -func assertDirImagesAreEqual(c *check.C, dir1, dir2 string) { +func assertDirImagesAreEqual(t *testing.T, dir1, dir2 string) { // The manifests may have different JWS signatures; so, compare the manifests by digests, which // strips the signatures. digests := []digest.Digest{} for _, dir := range []string{dir1, dir2} { manifestPath := filepath.Join(dir, "manifest.json") m, err := os.ReadFile(manifestPath) - c.Assert(err, check.IsNil) + require.NoError(t, err) digest, err := manifest.Digest(m) - c.Assert(err, check.IsNil) + require.NoError(t, err) digests = append(digests, digest) } - c.Assert(digests[0], check.Equals, digests[1]) + assert.Equal(t, digests[1], digests[0]) // Then compare the rest file by file. - out := combinedOutputOfCommand(c, "diff", "-urN", "-x", "manifest.json", dir1, dir2) - c.Assert(out, check.Equals, "") + out := combinedOutputOfCommand(t, "diff", "-urN", "-x", "manifest.json", dir1, dir2) + assert.Equal(t, "", out) } // Check whether schema1 dir: images in dir1 and dir2 are equal, ignoring schema1 signatures and the embedded path/tag values, which should have the expected values. -func assertSchema1DirImagesAreEqualExceptNames(c *check.C, dir1, ref1, dir2, ref2 string) { +func assertSchema1DirImagesAreEqualExceptNames(t *testing.T, dir1, ref1, dir2, ref2 string) { // The manifests may have different JWS signatures and names; so, unmarshal and delete these elements. manifests := []map[string]any{} for dir, ref := range map[string]string{dir1: ref1, dir2: ref2} { manifestPath := filepath.Join(dir, "manifest.json") m, err := os.ReadFile(manifestPath) - c.Assert(err, check.IsNil) + require.NoError(t, err) data := map[string]any{} err = json.Unmarshal(m, &data) - c.Assert(err, check.IsNil) - c.Assert(data["schemaVersion"], check.Equals, float64(1)) + require.NoError(t, err) + assert.Equal(t, float64(1), data["schemaVersion"]) colon := strings.LastIndex(ref, ":") - c.Assert(colon, check.Not(check.Equals), -1) - c.Assert(data["name"], check.Equals, ref[:colon]) - c.Assert(data["tag"], check.Equals, ref[colon+1:]) + require.NotEqual(t, -1, colon) + assert.Equal(t, ref[:colon], data["name"]) + assert.Equal(t, ref[colon+1:], data["tag"]) for _, key := range []string{"signatures", "name", "tag"} { delete(data, key) } manifests = append(manifests, data) } - c.Assert(manifests[0], check.DeepEquals, manifests[1]) + assert.Equal(t, manifests[0], manifests[1]) // Then compare the rest file by file. - out := combinedOutputOfCommand(c, "diff", "-urN", "-x", "manifest.json", dir1, dir2) - c.Assert(out, check.Equals, "") + out := combinedOutputOfCommand(t, "diff", "-urN", "-x", "manifest.json", dir1, dir2) + assert.Equal(t, "", out) } // Streaming (skopeo copy) -func (s *CopySuite) TestCopyStreaming(c *check.C) { - dir1 := c.MkDir() - dir2 := c.MkDir() +func (s *copySuite) TestCopyStreaming() { + t := s.T() + dir1 := t.TempDir() + dir2 := t.TempDir() // FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection. // streaming: docker: → atomic: - assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", testFQIN64, "atomic:localhost:5000/myns/unsigned:streaming") + assertSkopeoSucceeds(t, "", "--tls-verify=false", "--debug", "copy", testFQIN64, "atomic:localhost:5000/myns/unsigned:streaming") // Compare (copies of) the original and the copy: - assertSkopeoSucceeds(c, "", "copy", testFQIN64, "dir:"+dir1) - assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/unsigned:streaming", "dir:"+dir2) - assertSchema1DirImagesAreEqualExceptNames(c, dir1, "libpod/busybox:amd64", dir2, "myns/unsigned:streaming") + assertSkopeoSucceeds(t, "", "copy", testFQIN64, "dir:"+dir1) + assertSkopeoSucceeds(t, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/unsigned:streaming", "dir:"+dir2) + assertSchema1DirImagesAreEqualExceptNames(t, dir1, "libpod/busybox:amd64", dir2, "myns/unsigned:streaming") // FIXME: Also check pushing to docker:// } @@ -650,26 +683,27 @@ func (s *CopySuite) TestCopyStreaming(c *check.C) { // benefits of a tool like skopeo is that you can use OCI tooling to create an // image and then as the final step convert the image to a non-standard format // like Docker). But this only works if we _test_ it. -func (s *CopySuite) TestCopyOCIRoundTrip(c *check.C) { +func (s *copySuite) TestCopyOCIRoundTrip() { + t := s.T() const ourRegistry = "docker://" + v2DockerRegistryURL + "/" - oci1 := c.MkDir() - oci2 := c.MkDir() + oci1 := t.TempDir() + oci2 := t.TempDir() // Docker -> OCI - assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", testFQIN, "oci:"+oci1+":latest") + assertSkopeoSucceeds(t, "", "--tls-verify=false", "--debug", "copy", testFQIN, "oci:"+oci1+":latest") // OCI -> Docker - assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "oci:"+oci1+":latest", ourRegistry+"original/busybox:oci_copy") + assertSkopeoSucceeds(t, "", "--tls-verify=false", "--debug", "copy", "oci:"+oci1+":latest", ourRegistry+"original/busybox:oci_copy") // Docker -> OCI - assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", ourRegistry+"original/busybox:oci_copy", "oci:"+oci2+":latest") + assertSkopeoSucceeds(t, "", "--tls-verify=false", "--debug", "copy", ourRegistry+"original/busybox:oci_copy", "oci:"+oci2+":latest") // OCI -> Docker - assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "oci:"+oci2+":latest", ourRegistry+"original/busybox:oci_copy2") + assertSkopeoSucceeds(t, "", "--tls-verify=false", "--debug", "copy", "oci:"+oci2+":latest", ourRegistry+"original/busybox:oci_copy2") // TODO: Add some more tags to output to and check those work properly. // First, make sure the OCI blobs are the same. This should _always_ be true. - out := combinedOutputOfCommand(c, "diff", "-urN", oci1+"/blobs", oci2+"/blobs") - c.Assert(out, check.Equals, "") + out := combinedOutputOfCommand(t, "diff", "-urN", oci1+"/blobs", oci2+"/blobs") + assert.Equal(t, "", out) // For some silly reason we pass a logger to the OCI library here... logger := log.New(os.Stderr, "", 0) @@ -678,127 +712,130 @@ func (s *CopySuite) TestCopyOCIRoundTrip(c *check.C) { // non-compliance errors. DO NOT REMOVE THIS TEST UNLESS IT'S ABSOLUTELY // NECESSARY. err := image.ValidateLayout(oci1, nil, logger) - c.Assert(err, check.IsNil) + require.NoError(t, err) err = image.ValidateLayout(oci2, nil, logger) - c.Assert(err, check.IsNil) + require.NoError(t, err) // Now verify that everything is identical. Currently this is true, but // because we recompute the manifests on-the-fly this doesn't necessarily // always have to be true (but if this breaks in the future __PLEASE__ make // sure that the breakage actually makes sense before removing this check). - out = combinedOutputOfCommand(c, "diff", "-urN", oci1, oci2) - c.Assert(out, check.Equals, "") + out = combinedOutputOfCommand(t, "diff", "-urN", oci1, oci2) + assert.Equal(t, "", out) } // --sign-by and --policy copy, primarily using atomic: -func (s *CopySuite) TestCopySignatures(c *check.C) { +func (s *copySuite) TestCopySignatures() { + t := s.T() mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{}) - c.Assert(err, check.IsNil) + require.NoError(t, err) defer mech.Close() if err := mech.SupportsSigning(); err != nil { // FIXME? Test that verification and policy enforcement works, using signatures from fixtures - c.Skip(fmt.Sprintf("Signing not supported: %v", err)) + t.Skipf("Signing not supported: %v", err) } - dir := c.MkDir() + dir := t.TempDir() dirDest := "dir:" + dir - policy := fileFromFixture(c, "fixtures/policy.json", map[string]string{"@keydir@": s.gpgHome}) + policy := fileFromFixture(t, "fixtures/policy.json", map[string]string{"@keydir@": s.gpgHome}) defer os.Remove(policy) // type: reject - assertSkopeoFails(c, fmt.Sprintf(".*Source image rejected: Running image %s:latest is rejected by policy.*", testFQIN), + assertSkopeoFails(t, fmt.Sprintf(".*Source image rejected: Running image %s:latest is rejected by policy.*", testFQIN), "--policy", policy, "copy", testFQIN+":latest", dirDest) // type: insecureAcceptAnything - assertSkopeoSucceeds(c, "", "--policy", policy, "copy", "docker://quay.io/openshift/origin-hello-openshift", dirDest) + assertSkopeoSucceeds(t, "", "--policy", policy, "copy", "docker://quay.io/openshift/origin-hello-openshift", dirDest) // type: signedBy // Sign the images - assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--sign-by", "personal@example.com", testFQIN+":1.26", "atomic:localhost:5006/myns/personal:personal") - assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--sign-by", "official@example.com", testFQIN+":1.26.1", "atomic:localhost:5006/myns/official:official") + assertSkopeoSucceeds(t, "", "--tls-verify=false", "copy", "--sign-by", "personal@example.com", testFQIN+":1.26", "atomic:localhost:5006/myns/personal:personal") + assertSkopeoSucceeds(t, "", "--tls-verify=false", "copy", "--sign-by", "official@example.com", testFQIN+":1.26.1", "atomic:localhost:5006/myns/official:official") // Verify that we can pull them - assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/personal:personal", dirDest) - assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/official:official", dirDest) + assertSkopeoSucceeds(t, "", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/personal:personal", dirDest) + assertSkopeoSucceeds(t, "", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/official:official", dirDest) // Verify that mis-signed images are rejected - assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5006/myns/personal:personal", "atomic:localhost:5006/myns/official:attack") - assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5006/myns/official:official", "atomic:localhost:5006/myns/personal:attack") - assertSkopeoFails(c, ".*Source image rejected: Invalid GPG signature.*", + assertSkopeoSucceeds(t, "", "--tls-verify=false", "copy", "atomic:localhost:5006/myns/personal:personal", "atomic:localhost:5006/myns/official:attack") + assertSkopeoSucceeds(t, "", "--tls-verify=false", "copy", "atomic:localhost:5006/myns/official:official", "atomic:localhost:5006/myns/personal:attack") + assertSkopeoFails(t, ".*Source image rejected: Invalid GPG signature.*", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/personal:attack", dirDest) - assertSkopeoFails(c, ".*Source image rejected: Invalid GPG signature.*", + assertSkopeoFails(t, ".*Source image rejected: Invalid GPG signature.*", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/official:attack", dirDest) // Verify that signed identity is verified. - assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5006/myns/official:official", "atomic:localhost:5006/myns/naming:test1") - assertSkopeoFails(c, ".*Source image rejected: Signature for identity localhost:5006/myns/official:official is not accepted.*", + assertSkopeoSucceeds(t, "", "--tls-verify=false", "copy", "atomic:localhost:5006/myns/official:official", "atomic:localhost:5006/myns/naming:test1") + assertSkopeoFails(t, ".*Source image rejected: Signature for identity localhost:5006/myns/official:official is not accepted.*", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/naming:test1", dirDest) // signedIdentity works - assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5006/myns/official:official", "atomic:localhost:5006/myns/naming:naming") - assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/naming:naming", dirDest) + assertSkopeoSucceeds(t, "", "--tls-verify=false", "copy", "atomic:localhost:5006/myns/official:official", "atomic:localhost:5006/myns/naming:naming") + assertSkopeoSucceeds(t, "", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/naming:naming", dirDest) // Verify that cosigning requirements are enforced - assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5006/myns/official:official", "atomic:localhost:5006/myns/cosigned:cosigned") - assertSkopeoFails(c, ".*Source image rejected: Invalid GPG signature.*", + assertSkopeoSucceeds(t, "", "--tls-verify=false", "copy", "atomic:localhost:5006/myns/official:official", "atomic:localhost:5006/myns/cosigned:cosigned") + assertSkopeoFails(t, ".*Source image rejected: Invalid GPG signature.*", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/cosigned:cosigned", dirDest) - assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--sign-by", "personal@example.com", "atomic:localhost:5006/myns/official:official", "atomic:localhost:5006/myns/cosigned:cosigned") - assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/cosigned:cosigned", dirDest) + assertSkopeoSucceeds(t, "", "--tls-verify=false", "copy", "--sign-by", "personal@example.com", "atomic:localhost:5006/myns/official:official", "atomic:localhost:5006/myns/cosigned:cosigned") + assertSkopeoSucceeds(t, "", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/cosigned:cosigned", dirDest) } // --policy copy for dir: sources -func (s *CopySuite) TestCopyDirSignatures(c *check.C) { +func (s *copySuite) TestCopyDirSignatures() { + t := s.T() mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{}) - c.Assert(err, check.IsNil) + require.NoError(t, err) defer mech.Close() if err := mech.SupportsSigning(); err != nil { // FIXME? Test that verification and policy enforcement works, using signatures from fixtures - c.Skip(fmt.Sprintf("Signing not supported: %v", err)) + t.Skipf("Signing not supported: %v", err) } - topDir := c.MkDir() + topDir := t.TempDir() topDirDest := "dir:" + topDir for _, suffix := range []string{"/dir1", "/dir2", "/restricted/personal", "/restricted/official", "/restricted/badidentity", "/dest"} { err := os.MkdirAll(topDir+suffix, 0755) - c.Assert(err, check.IsNil) + require.NoError(t, err) } // Note the "/@dirpath@": The value starts with a slash so that it is not rejected in other tests which do not replace it, // but we must ensure that the result is a canonical path, not something starting with a "//". - policy := fileFromFixture(c, "fixtures/policy.json", map[string]string{"@keydir@": s.gpgHome, "/@dirpath@": topDir + "/restricted"}) + policy := fileFromFixture(t, "fixtures/policy.json", map[string]string{"@keydir@": s.gpgHome, "/@dirpath@": topDir + "/restricted"}) defer os.Remove(policy) // Get some images. - assertSkopeoSucceeds(c, "", "copy", testFQIN+":armfh", topDirDest+"/dir1") - assertSkopeoSucceeds(c, "", "copy", testFQIN+":s390x", topDirDest+"/dir2") + assertSkopeoSucceeds(t, "", "copy", testFQIN+":armfh", topDirDest+"/dir1") + assertSkopeoSucceeds(t, "", "copy", testFQIN+":s390x", topDirDest+"/dir2") // Sign the images. By coping from a topDirDest/dirN, also test that non-/restricted paths // use the dir:"" default of insecureAcceptAnything. // (For signing, we must push to atomic: to get a Docker identity to use in the signature.) - assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "--sign-by", "personal@example.com", topDirDest+"/dir1", "atomic:localhost:5000/myns/personal:dirstaging") - assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "--sign-by", "official@example.com", topDirDest+"/dir2", "atomic:localhost:5000/myns/official:dirstaging") - assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/personal:dirstaging", topDirDest+"/restricted/personal") - assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/official:dirstaging", topDirDest+"/restricted/official") + assertSkopeoSucceeds(t, "", "--tls-verify=false", "--policy", policy, "copy", "--sign-by", "personal@example.com", topDirDest+"/dir1", "atomic:localhost:5000/myns/personal:dirstaging") + assertSkopeoSucceeds(t, "", "--tls-verify=false", "--policy", policy, "copy", "--sign-by", "official@example.com", topDirDest+"/dir2", "atomic:localhost:5000/myns/official:dirstaging") + assertSkopeoSucceeds(t, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/personal:dirstaging", topDirDest+"/restricted/personal") + assertSkopeoSucceeds(t, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/official:dirstaging", topDirDest+"/restricted/official") // type: signedBy, with a signedIdentity override (necessary because dir: identities can't be signed) // Verify that correct images are accepted - assertSkopeoSucceeds(c, "", "--policy", policy, "copy", topDirDest+"/restricted/official", topDirDest+"/dest") + assertSkopeoSucceeds(t, "", "--policy", policy, "copy", topDirDest+"/restricted/official", topDirDest+"/dest") // ... and that mis-signed images are rejected. - assertSkopeoFails(c, ".*Source image rejected: Invalid GPG signature.*", + assertSkopeoFails(t, ".*Source image rejected: Invalid GPG signature.*", "--policy", policy, "copy", topDirDest+"/restricted/personal", topDirDest+"/dest") // Verify that the signed identity is verified. - assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "--sign-by", "official@example.com", topDirDest+"/dir1", "atomic:localhost:5000/myns/personal:dirstaging2") - assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/personal:dirstaging2", topDirDest+"/restricted/badidentity") - assertSkopeoFails(c, ".*Source image rejected: .*Signature for identity localhost:5000/myns/personal:dirstaging2 is not accepted.*", + assertSkopeoSucceeds(t, "", "--tls-verify=false", "--policy", policy, "copy", "--sign-by", "official@example.com", topDirDest+"/dir1", "atomic:localhost:5000/myns/personal:dirstaging2") + assertSkopeoSucceeds(t, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/personal:dirstaging2", topDirDest+"/restricted/badidentity") + assertSkopeoFails(t, ".*Source image rejected: .*Signature for identity localhost:5000/myns/personal:dirstaging2 is not accepted.*", "--policy", policy, "copy", topDirDest+"/restricted/badidentity", topDirDest+"/dest") } // Compression during copy -func (s *CopySuite) TestCopyCompression(c *check.C) { +func (s *copySuite) TestCopyCompression() { + t := s.T() const uncompresssedLayerFile = "160d823fdc48e62f97ba62df31e55424f8f5eb6b679c865eec6e59adfe304710" - topDir := c.MkDir() + topDir := t.TempDir() - for i, t := range []struct{ fixture, remote string }{ + for i, c := range []struct{ fixture, remote string }{ {"uncompressed-image-s1", "docker://" + v2DockerRegistryURL + "/compression/compression:s1"}, {"uncompressed-image-s2", "docker://" + v2DockerRegistryURL + "/compression/compression:s2"}, {"uncompressed-image-s1", "atomic:localhost:5000/myns/compression:s1"}, @@ -806,30 +843,30 @@ func (s *CopySuite) TestCopyCompression(c *check.C) { } { dir := filepath.Join(topDir, fmt.Sprintf("case%d", i)) err := os.MkdirAll(dir, 0755) - c.Assert(err, check.IsNil) + require.NoError(t, err) - assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "dir:fixtures/"+t.fixture, t.remote) - assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", t.remote, "dir:"+dir) + assertSkopeoSucceeds(t, "", "--tls-verify=false", "copy", "dir:fixtures/"+c.fixture, c.remote) + assertSkopeoSucceeds(t, "", "--tls-verify=false", "copy", c.remote, "dir:"+dir) // The original directory contained an uncompressed file, the copy after pushing and pulling doesn't (we use a different name for the compressed file). - _, err = os.Lstat(filepath.Join("fixtures", t.fixture, uncompresssedLayerFile)) - c.Assert(err, check.IsNil) + _, err = os.Lstat(filepath.Join("fixtures", c.fixture, uncompresssedLayerFile)) + require.NoError(t, err) _, err = os.Lstat(filepath.Join(dir, uncompresssedLayerFile)) - c.Assert(err, check.NotNil) - c.Assert(os.IsNotExist(err), check.Equals, true) + require.Error(t, err) + assert.True(t, os.IsNotExist(err)) // All pulled layers are smaller than the uncompressed size of uncompresssedLayerFile. (Note that this includes the manifest in s2, but that works out OK). dirf, err := os.Open(dir) - c.Assert(err, check.IsNil) + require.NoError(t, err) fis, err := dirf.Readdir(-1) - c.Assert(err, check.IsNil) + require.NoError(t, err) for _, fi := range fis { - c.Assert(fi.Size() < 2048, check.Equals, true) + assert.Less(t, fi.Size(), int64(2048)) } } } -func findRegularFiles(c *check.C, root string) []string { +func findRegularFiles(t *testing.T, root string) []string { result := []string{} err := filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error { if err != nil { @@ -840,25 +877,26 @@ func findRegularFiles(c *check.C, root string) []string { } return nil }) - c.Assert(err, check.IsNil) + require.NoError(t, err) return result } // --sign-by and policy use for docker: with lookaside -func (s *CopySuite) TestCopyDockerLookaside(c *check.C) { +func (s *copySuite) TestCopyDockerLookaside() { + t := s.T() mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{}) - c.Assert(err, check.IsNil) + require.NoError(t, err) defer mech.Close() if err := mech.SupportsSigning(); err != nil { // FIXME? Test that verification and policy enforcement works, using signatures from fixtures - c.Skip(fmt.Sprintf("Signing not supported: %v", err)) + t.Skipf("Signing not supported: %v", err) } const ourRegistry = "docker://" + v2DockerRegistryURL + "/" - tmpDir := c.MkDir() + tmpDir := t.TempDir() copyDest := filepath.Join(tmpDir, "dest") err = os.Mkdir(copyDest, 0755) - c.Assert(err, check.IsNil) + require.NoError(t, err) dirDest := "dir:" + copyDest plainLookaside := filepath.Join(tmpDir, "lookaside") splitLookasideStaging := filepath.Join(tmpDir, "lookaside-staging") @@ -869,121 +907,123 @@ func (s *CopySuite) TestCopyDockerLookaside(c *check.C) { })) defer splitLookasideReadServer.Close() - policy := fileFromFixture(c, "fixtures/policy.json", map[string]string{"@keydir@": s.gpgHome}) + policy := fileFromFixture(t, "fixtures/policy.json", map[string]string{"@keydir@": s.gpgHome}) defer os.Remove(policy) registriesDir := filepath.Join(tmpDir, "registries.d") err = os.Mkdir(registriesDir, 0755) - c.Assert(err, check.IsNil) - registriesFile := fileFromFixture(c, "fixtures/registries.yaml", + require.NoError(t, err) + registriesFile := fileFromFixture(t, "fixtures/registries.yaml", map[string]string{"@lookaside@": plainLookaside, "@split-staging@": splitLookasideStaging, "@split-read@": splitLookasideReadServer.URL}) err = os.Symlink(registriesFile, filepath.Join(registriesDir, "registries.yaml")) - c.Assert(err, check.IsNil) + require.NoError(t, err) // Get an image to work with. Also verifies that we can use Docker repositories with no lookaside configured. - assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", testFQIN, ourRegistry+"original/busybox") + assertSkopeoSucceeds(t, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", testFQIN, ourRegistry+"original/busybox") // Pulling an unsigned image fails. - assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*", + assertSkopeoFails(t, ".*Source image rejected: A signature was required, but no signature exists.*", "--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", ourRegistry+"original/busybox", dirDest) // Signing with lookaside defined succeeds, - assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", "--sign-by", "personal@example.com", ourRegistry+"original/busybox", ourRegistry+"signed/busybox") + assertSkopeoSucceeds(t, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", "--sign-by", "personal@example.com", ourRegistry+"original/busybox", ourRegistry+"signed/busybox") // a signature file has been created, - foundFiles := findRegularFiles(c, plainLookaside) - c.Assert(foundFiles, check.HasLen, 1) + foundFiles := findRegularFiles(t, plainLookaside) + assert.Len(t, foundFiles, 1) // and pulling a signed image succeeds. - assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", ourRegistry+"signed/busybox", dirDest) + assertSkopeoSucceeds(t, "", "--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", ourRegistry+"signed/busybox", dirDest) // Deleting the image succeeds, - assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "delete", ourRegistry+"signed/busybox") + assertSkopeoSucceeds(t, "", "--tls-verify=false", "--registries.d", registriesDir, "delete", ourRegistry+"signed/busybox") // and the signature file has been deleted (but we leave the directories around). - foundFiles = findRegularFiles(c, plainLookaside) - c.Assert(foundFiles, check.HasLen, 0) + foundFiles = findRegularFiles(t, plainLookaside) + assert.Len(t, foundFiles, 0) // Signing with a read/write lookaside split succeeds, - assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", "--sign-by", "personal@example.com", ourRegistry+"original/busybox", ourRegistry+"public/busybox") + assertSkopeoSucceeds(t, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", "--sign-by", "personal@example.com", ourRegistry+"original/busybox", ourRegistry+"public/busybox") // and a signature file has been created. - foundFiles = findRegularFiles(c, splitLookasideStaging) - c.Assert(foundFiles, check.HasLen, 1) + foundFiles = findRegularFiles(t, splitLookasideStaging) + assert.Len(t, foundFiles, 1) // Pulling the image fails because the read lookaside URL has not been populated: - assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*", + assertSkopeoFails(t, ".*Source image rejected: A signature was required, but no signature exists.*", "--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", ourRegistry+"public/busybox", dirDest) // Pulling the image succeeds after the read lookaside URL is available: splitLookasideReadServerHandler = http.FileServer(http.Dir(splitLookasideStaging)) - assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", ourRegistry+"public/busybox", dirDest) + assertSkopeoSucceeds(t, "", "--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", ourRegistry+"public/busybox", dirDest) } // atomic: and docker: X-Registry-Supports-Signatures works and interoperates -func (s *CopySuite) TestCopyAtomicExtension(c *check.C) { +func (s *copySuite) TestCopyAtomicExtension() { + t := s.T() mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{}) - c.Assert(err, check.IsNil) + require.NoError(t, err) defer mech.Close() if err := mech.SupportsSigning(); err != nil { // FIXME? Test that the reading/writing works using signatures from fixtures - c.Skip(fmt.Sprintf("Signing not supported: %v", err)) + t.Skipf("Signing not supported: %v", err) } - topDir := c.MkDir() + topDir := t.TempDir() for _, subdir := range []string{"dirAA", "dirAD", "dirDA", "dirDD", "registries.d"} { err := os.MkdirAll(filepath.Join(topDir, subdir), 0755) - c.Assert(err, check.IsNil) + require.NoError(t, err) } registriesDir := filepath.Join(topDir, "registries.d") dirDest := "dir:" + topDir - policy := fileFromFixture(c, "fixtures/policy.json", map[string]string{"@keydir@": s.gpgHome}) + policy := fileFromFixture(t, "fixtures/policy.json", map[string]string{"@keydir@": s.gpgHome}) defer os.Remove(policy) // Get an image to work with to an atomic: destination. Also verifies that we can use Docker repositories without X-Registry-Supports-Signatures - assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", testFQIN, "atomic:localhost:5000/myns/extension:unsigned") + assertSkopeoSucceeds(t, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", testFQIN, "atomic:localhost:5000/myns/extension:unsigned") // Pulling an unsigned image using atomic: fails. - assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*", + assertSkopeoFails(t, ".*Source image rejected: A signature was required, but no signature exists.*", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5000/myns/extension:unsigned", dirDest+"/dirAA") // The same when pulling using docker: - assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*", + assertSkopeoFails(t, ".*Source image rejected: A signature was required, but no signature exists.*", "--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", "docker://localhost:5000/myns/extension:unsigned", dirDest+"/dirAD") // Sign the image using atomic: - assertSkopeoSucceeds(c, "", "--tls-verify=false", + assertSkopeoSucceeds(t, "", "--tls-verify=false", "copy", "--sign-by", "personal@example.com", "atomic:localhost:5000/myns/extension:unsigned", "atomic:localhost:5000/myns/extension:atomic") // Pulling the image using atomic: now succeeds. - assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, + assertSkopeoSucceeds(t, "", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5000/myns/extension:atomic", dirDest+"/dirAA") // The same when pulling using docker: - assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, + assertSkopeoSucceeds(t, "", "--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", "docker://localhost:5000/myns/extension:atomic", dirDest+"/dirAD") // Both access methods result in the same data. - assertDirImagesAreEqual(c, filepath.Join(topDir, "dirAA"), filepath.Join(topDir, "dirAD")) + assertDirImagesAreEqual(t, filepath.Join(topDir, "dirAA"), filepath.Join(topDir, "dirAD")) // Get another image (different so that they don't share signatures, and sign it using docker://) - assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, + assertSkopeoSucceeds(t, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", "--sign-by", "personal@example.com", testFQIN+":ppc64le", "docker://localhost:5000/myns/extension:extension") - c.Logf("%s", combinedOutputOfCommand(c, "oc", "get", "istag", "extension:extension", "-o", "json")) + t.Logf("%s", combinedOutputOfCommand(t, "oc", "get", "istag", "extension:extension", "-o", "json")) // Pulling the image using atomic: succeeds. - assertSkopeoSucceeds(c, "", "--debug", "--tls-verify=false", "--policy", policy, + assertSkopeoSucceeds(t, "", "--debug", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5000/myns/extension:extension", dirDest+"/dirDA") // The same when pulling using docker: - assertSkopeoSucceeds(c, "", "--debug", "--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, + assertSkopeoSucceeds(t, "", "--debug", "--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", "docker://localhost:5000/myns/extension:extension", dirDest+"/dirDD") // Both access methods result in the same data. - assertDirImagesAreEqual(c, filepath.Join(topDir, "dirDA"), filepath.Join(topDir, "dirDD")) + assertDirImagesAreEqual(t, filepath.Join(topDir, "dirDA"), filepath.Join(topDir, "dirDD")) } // Both mirroring support in registries.conf, and mirrored remapIdentity support in policy.json -func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) { +func (s *copySuite) TestCopyVerifyingMirroredSignatures() { + t := s.T() const regPrefix = "docker://localhost:5006/myns/mirroring-" mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{}) - c.Assert(err, check.IsNil) + require.NoError(t, err) defer mech.Close() if err := mech.SupportsSigning(); err != nil { // FIXME? Test that verification and policy enforcement works, using signatures from fixtures - c.Skip(fmt.Sprintf("Signing not supported: %v", err)) + t.Skipf("Signing not supported: %v", err) } - topDir := c.MkDir() + topDir := t.TempDir() registriesDir := filepath.Join(topDir, "registries.d") // An empty directory to disable lookaside use dirDest := "dir:" + filepath.Join(topDir, "unused-dest") - policy := fileFromFixture(c, "fixtures/policy.json", map[string]string{"@keydir@": s.gpgHome}) + policy := fileFromFixture(t, "fixtures/policy.json", map[string]string{"@keydir@": s.gpgHome}) defer os.Remove(policy) // We use X-R-S-S for this testing to avoid having to deal with the lookasides. @@ -992,120 +1032,127 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) { // So, make sure to never create a signature that could be considered valid in a different part of the test (i.e. don't reuse tags). // Get an image to work with. - assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", testFQIN, regPrefix+"primary:unsigned") + assertSkopeoSucceeds(t, "", "copy", "--dest-tls-verify=false", testFQIN, regPrefix+"primary:unsigned") // Verify that unsigned images are rejected - assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*", + assertSkopeoFails(t, ".*Source image rejected: A signature was required, but no signature exists.*", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:unsigned", dirDest) // Sign the image for the primary location - assertSkopeoSucceeds(c, "", "--registries.d", registriesDir, "copy", "--src-tls-verify=false", "--dest-tls-verify=false", "--sign-by", "personal@example.com", regPrefix+"primary:unsigned", regPrefix+"primary:direct") + assertSkopeoSucceeds(t, "", "--registries.d", registriesDir, "copy", "--src-tls-verify=false", "--dest-tls-verify=false", "--sign-by", "personal@example.com", regPrefix+"primary:unsigned", regPrefix+"primary:direct") // Verify that a correctly signed image in the primary location is usable. - assertSkopeoSucceeds(c, "", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:direct", dirDest) + assertSkopeoSucceeds(t, "", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:direct", dirDest) // Sign the image for the mirror - assertSkopeoSucceeds(c, "", "--registries.d", registriesDir, "copy", "--src-tls-verify=false", "--dest-tls-verify=false", "--sign-by", "personal@example.com", regPrefix+"primary:unsigned", regPrefix+"mirror:mirror-signed") + assertSkopeoSucceeds(t, "", "--registries.d", registriesDir, "copy", "--src-tls-verify=false", "--dest-tls-verify=false", "--sign-by", "personal@example.com", regPrefix+"primary:unsigned", regPrefix+"mirror:mirror-signed") // Verify that a correctly signed image for the mirror is accessible using the mirror's reference - assertSkopeoSucceeds(c, "", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"mirror:mirror-signed", dirDest) + assertSkopeoSucceeds(t, "", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"mirror:mirror-signed", dirDest) // … but verify that while it is accessible using the primary location redirecting to the mirror, … - assertSkopeoSucceeds(c, "" /* no --policy */, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:mirror-signed", dirDest) + assertSkopeoSucceeds(t, "" /* no --policy */, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:mirror-signed", dirDest) // … verify it is NOT accessible when requiring a signature. - assertSkopeoFails(c, ".*Source image rejected: None of the signatures were accepted, reasons: Signature for identity localhost:5006/myns/mirroring-primary:direct is not accepted; Signature for identity localhost:5006/myns/mirroring-mirror:mirror-signed is not accepted.*", + assertSkopeoFails(t, ".*Source image rejected: None of the signatures were accepted, reasons: Signature for identity localhost:5006/myns/mirroring-primary:direct is not accepted; Signature for identity localhost:5006/myns/mirroring-mirror:mirror-signed is not accepted.*", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:mirror-signed", dirDest) // Fail if we specify an unqualified identity - assertSkopeoFails(c, ".*Could not parse --sign-identity: repository name must be canonical.*", + assertSkopeoFails(t, ".*Could not parse --sign-identity: repository name must be canonical.*", "--registries.d", registriesDir, "copy", "--src-tls-verify=false", "--dest-tls-verify=false", "--sign-by=personal@example.com", "--sign-identity=this-is-not-fully-specified", regPrefix+"primary:unsigned", regPrefix+"mirror:primary-signed") // Create a signature for mirroring-primary:primary-signed without pushing there. - assertSkopeoSucceeds(c, "", "--registries.d", registriesDir, "copy", "--src-tls-verify=false", "--dest-tls-verify=false", "--sign-by=personal@example.com", "--sign-identity=localhost:5006/myns/mirroring-primary:primary-signed", regPrefix+"primary:unsigned", regPrefix+"mirror:primary-signed") + assertSkopeoSucceeds(t, "", "--registries.d", registriesDir, "copy", "--src-tls-verify=false", "--dest-tls-verify=false", "--sign-by=personal@example.com", "--sign-identity=localhost:5006/myns/mirroring-primary:primary-signed", regPrefix+"primary:unsigned", regPrefix+"mirror:primary-signed") // Verify that a correctly signed image for the primary is accessible using the primary's reference - assertSkopeoSucceeds(c, "", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:primary-signed", dirDest) + assertSkopeoSucceeds(t, "", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:primary-signed", dirDest) // … but verify that while it is accessible using the mirror location - assertSkopeoSucceeds(c, "" /* no --policy */, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"mirror:primary-signed", dirDest) + assertSkopeoSucceeds(t, "" /* no --policy */, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"mirror:primary-signed", dirDest) // … verify it is NOT accessible when requiring a signature. - assertSkopeoFails(c, ".*Source image rejected: None of the signatures were accepted, reasons: Signature for identity localhost:5006/myns/mirroring-primary:direct is not accepted; Signature for identity localhost:5006/myns/mirroring-mirror:mirror-signed is not accepted; Signature for identity localhost:5006/myns/mirroring-primary:primary-signed is not accepted.*", + assertSkopeoFails(t, ".*Source image rejected: None of the signatures were accepted, reasons: Signature for identity localhost:5006/myns/mirroring-primary:direct is not accepted; Signature for identity localhost:5006/myns/mirroring-mirror:mirror-signed is not accepted; Signature for identity localhost:5006/myns/mirroring-primary:primary-signed is not accepted.*", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"mirror:primary-signed", dirDest) - assertSkopeoSucceeds(c, "", "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", "--dest-tls-verify=false", regPrefix+"primary:unsigned", regPrefix+"remap:remapped") + assertSkopeoSucceeds(t, "", "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", "--dest-tls-verify=false", regPrefix+"primary:unsigned", regPrefix+"remap:remapped") // Verify that while a remapIdentity image is accessible using the remapped (mirror) location - assertSkopeoSucceeds(c, "" /* no --policy */, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"remap:remapped", dirDest) + assertSkopeoSucceeds(t, "" /* no --policy */, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"remap:remapped", dirDest) // … it is NOT accessible when requiring a signature … - assertSkopeoFails(c, ".*Source image rejected: None of the signatures were accepted, reasons: Signature for identity localhost:5006/myns/mirroring-primary:direct is not accepted; Signature for identity localhost:5006/myns/mirroring-mirror:mirror-signed is not accepted; Signature for identity localhost:5006/myns/mirroring-primary:primary-signed is not accepted.*", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"remap:remapped", dirDest) + assertSkopeoFails(t, ".*Source image rejected: None of the signatures were accepted, reasons: Signature for identity localhost:5006/myns/mirroring-primary:direct is not accepted; Signature for identity localhost:5006/myns/mirroring-mirror:mirror-signed is not accepted; Signature for identity localhost:5006/myns/mirroring-primary:primary-signed is not accepted.*", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"remap:remapped", dirDest) // … until signed. - assertSkopeoSucceeds(c, "", "--registries.d", registriesDir, "copy", "--src-tls-verify=false", "--dest-tls-verify=false", "--sign-by=personal@example.com", "--sign-identity=localhost:5006/myns/mirroring-primary:remapped", regPrefix+"remap:remapped", regPrefix+"remap:remapped") - assertSkopeoSucceeds(c, "", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"remap:remapped", dirDest) + assertSkopeoSucceeds(t, "", "--registries.d", registriesDir, "copy", "--src-tls-verify=false", "--dest-tls-verify=false", "--sign-by=personal@example.com", "--sign-identity=localhost:5006/myns/mirroring-primary:remapped", regPrefix+"remap:remapped", regPrefix+"remap:remapped") + assertSkopeoSucceeds(t, "", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"remap:remapped", dirDest) // To be extra clear about the semantics, verify that the signedPrefix (primary) location never exists // and only the remapped prefix (mirror) is accessed. - assertSkopeoFails(c, ".*initializing source docker://localhost:5006/myns/mirroring-primary:remapped:.*manifest unknown.*", + assertSkopeoFails(t, ".*initializing source docker://localhost:5006/myns/mirroring-primary:remapped:.*manifest unknown.*", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:remapped", dirDest) } -func (s *SkopeoSuite) TestCopySrcWithAuth(c *check.C) { - assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", testFQIN, fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url)) - dir1 := c.MkDir() - assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--src-creds=testuser:testpassword", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), "dir:"+dir1) +func (s *skopeoSuite) TestCopySrcWithAuth() { + t := s.T() + assertSkopeoSucceeds(t, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", testFQIN, fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url)) + dir1 := t.TempDir() + assertSkopeoSucceeds(t, "", "--tls-verify=false", "copy", "--src-creds=testuser:testpassword", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), "dir:"+dir1) } -func (s *SkopeoSuite) TestCopyDestWithAuth(c *check.C) { - assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", testFQIN, fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url)) +func (s *skopeoSuite) TestCopyDestWithAuth() { + t := s.T() + assertSkopeoSucceeds(t, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", testFQIN, fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url)) } -func (s *SkopeoSuite) TestCopySrcAndDestWithAuth(c *check.C) { - assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", testFQIN, fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url)) - assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--src-creds=testuser:testpassword", "--dest-creds=testuser:testpassword", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), fmt.Sprintf("docker://%s/test:auth", s.regV2WithAuth.url)) +func (s *skopeoSuite) TestCopySrcAndDestWithAuth() { + t := s.T() + assertSkopeoSucceeds(t, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", testFQIN, fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url)) + assertSkopeoSucceeds(t, "", "--tls-verify=false", "copy", "--src-creds=testuser:testpassword", "--dest-creds=testuser:testpassword", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), fmt.Sprintf("docker://%s/test:auth", s.regV2WithAuth.url)) } -func (s *CopySuite) TestCopyNoPanicOnHTTPResponseWithoutTLSVerifyFalse(c *check.C) { - topDir := c.MkDir() +func (s *copySuite) TestCopyNoPanicOnHTTPResponseWithoutTLSVerifyFalse() { + t := s.T() + topDir := t.TempDir() const ourRegistry = "docker://" + v2DockerRegistryURL + "/" - assertSkopeoFails(c, ".*server gave HTTP response to HTTPS client.*", + assertSkopeoFails(t, ".*server gave HTTP response to HTTPS client.*", "copy", ourRegistry+"foobar", "dir:"+topDir) } -func (s *CopySuite) TestCopySchemaConversion(c *check.C) { +func (s *copySuite) TestCopySchemaConversion() { + t := s.T() // Test conversion / schema autodetection both for the OpenShift embedded registry… - s.testCopySchemaConversionRegistries(c, "docker://localhost:5005/myns/schema1", "docker://localhost:5006/myns/schema2") + s.testCopySchemaConversionRegistries(t, "docker://localhost:5005/myns/schema1", "docker://localhost:5006/myns/schema2") // … and for various docker/distribution registry versions. - s.testCopySchemaConversionRegistries(c, "docker://"+v2s1DockerRegistryURL+"/schema1", "docker://"+v2DockerRegistryURL+"/schema2") + s.testCopySchemaConversionRegistries(t, "docker://"+v2s1DockerRegistryURL+"/schema1", "docker://"+v2DockerRegistryURL+"/schema2") } -func (s *CopySuite) TestCopyManifestConversion(c *check.C) { - topDir := c.MkDir() +func (s *copySuite) TestCopyManifestConversion() { + t := s.T() + topDir := t.TempDir() srcDir := filepath.Join(topDir, "source") destDir1 := filepath.Join(topDir, "dest1") destDir2 := filepath.Join(topDir, "dest2") // oci to v2s1 and vice-versa not supported yet // get v2s2 manifest type - assertSkopeoSucceeds(c, "", "copy", testFQIN, "dir:"+srcDir) - verifyManifestMIMEType(c, srcDir, manifest.DockerV2Schema2MediaType) + assertSkopeoSucceeds(t, "", "copy", testFQIN, "dir:"+srcDir) + verifyManifestMIMEType(t, srcDir, manifest.DockerV2Schema2MediaType) // convert from v2s2 to oci - assertSkopeoSucceeds(c, "", "copy", "--format=oci", "dir:"+srcDir, "dir:"+destDir1) - verifyManifestMIMEType(c, destDir1, imgspecv1.MediaTypeImageManifest) + assertSkopeoSucceeds(t, "", "copy", "--format=oci", "dir:"+srcDir, "dir:"+destDir1) + verifyManifestMIMEType(t, destDir1, imgspecv1.MediaTypeImageManifest) // convert from oci to v2s2 - assertSkopeoSucceeds(c, "", "copy", "--format=v2s2", "dir:"+destDir1, "dir:"+destDir2) - verifyManifestMIMEType(c, destDir2, manifest.DockerV2Schema2MediaType) + assertSkopeoSucceeds(t, "", "copy", "--format=v2s2", "dir:"+destDir1, "dir:"+destDir2) + verifyManifestMIMEType(t, destDir2, manifest.DockerV2Schema2MediaType) // convert from v2s2 to v2s1 - assertSkopeoSucceeds(c, "", "copy", "--format=v2s1", "dir:"+srcDir, "dir:"+destDir1) - verifyManifestMIMEType(c, destDir1, manifest.DockerV2Schema1SignedMediaType) + assertSkopeoSucceeds(t, "", "copy", "--format=v2s1", "dir:"+srcDir, "dir:"+destDir1) + verifyManifestMIMEType(t, destDir1, manifest.DockerV2Schema1SignedMediaType) // convert from v2s1 to v2s2 - assertSkopeoSucceeds(c, "", "copy", "--format=v2s2", "dir:"+destDir1, "dir:"+destDir2) - verifyManifestMIMEType(c, destDir2, manifest.DockerV2Schema2MediaType) + assertSkopeoSucceeds(t, "", "copy", "--format=v2s2", "dir:"+destDir1, "dir:"+destDir2) + verifyManifestMIMEType(t, destDir2, manifest.DockerV2Schema2MediaType) } -func (s *CopySuite) TestCopyPreserveDigests(c *check.C) { - topDir := c.MkDir() +func (s *copySuite) TestCopyPreserveDigests() { + t := s.T() + topDir := t.TempDir() - assertSkopeoSucceeds(c, "", "copy", knownListImage, "--multi-arch=all", "--preserve-digests", "dir:"+topDir) - assertSkopeoFails(c, ".*Instructed to preserve digests.*", "copy", knownListImage, "--multi-arch=all", "--preserve-digests", "--format=oci", "dir:"+topDir) + assertSkopeoSucceeds(t, "", "copy", knownListImage, "--multi-arch=all", "--preserve-digests", "dir:"+topDir) + assertSkopeoFails(t, ".*Instructed to preserve digests.*", "copy", knownListImage, "--multi-arch=all", "--preserve-digests", "--format=oci", "dir:"+topDir) } -func (s *CopySuite) testCopySchemaConversionRegistries(c *check.C, schema1Registry, schema2Registry string) { - topDir := c.MkDir() +func (s *copySuite) testCopySchemaConversionRegistries(t *testing.T, schema1Registry, schema2Registry string) { + topDir := t.TempDir() for _, subdir := range []string{"input1", "input2", "dest2"} { err := os.MkdirAll(filepath.Join(topDir, subdir), 0755) - c.Assert(err, check.IsNil) + require.NoError(t, err) } input1Dir := filepath.Join(topDir, "input1") input2Dir := filepath.Join(topDir, "input2") @@ -1113,60 +1160,65 @@ func (s *CopySuite) testCopySchemaConversionRegistries(c *check.C, schema1Regist // Ensure we are working with a schema2 image. // dir: accepts any manifest format, i.e. this makes …/input2 a schema2 source which cannot be asked to produce schema1 like ordinary docker: registries can. - assertSkopeoSucceeds(c, "", "copy", testFQIN, "dir:"+input2Dir) - verifyManifestMIMEType(c, input2Dir, manifest.DockerV2Schema2MediaType) + assertSkopeoSucceeds(t, "", "copy", testFQIN, "dir:"+input2Dir) + verifyManifestMIMEType(t, input2Dir, manifest.DockerV2Schema2MediaType) // 2→2 (the "f2t2" in tag means "from 2 to 2") - assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "dir:"+input2Dir, schema2Registry+":f2t2") - assertSkopeoSucceeds(c, "", "copy", "--src-tls-verify=false", schema2Registry+":f2t2", "dir:"+destDir) - verifyManifestMIMEType(c, destDir, manifest.DockerV2Schema2MediaType) + assertSkopeoSucceeds(t, "", "copy", "--dest-tls-verify=false", "dir:"+input2Dir, schema2Registry+":f2t2") + assertSkopeoSucceeds(t, "", "copy", "--src-tls-verify=false", schema2Registry+":f2t2", "dir:"+destDir) + verifyManifestMIMEType(t, destDir, manifest.DockerV2Schema2MediaType) // 2→1; we will use the result as a schema1 image for further tests. - assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "dir:"+input2Dir, schema1Registry+":f2t1") - assertSkopeoSucceeds(c, "", "copy", "--src-tls-verify=false", schema1Registry+":f2t1", "dir:"+input1Dir) - verifyManifestMIMEType(c, input1Dir, manifest.DockerV2Schema1SignedMediaType) + assertSkopeoSucceeds(t, "", "copy", "--dest-tls-verify=false", "dir:"+input2Dir, schema1Registry+":f2t1") + assertSkopeoSucceeds(t, "", "copy", "--src-tls-verify=false", schema1Registry+":f2t1", "dir:"+input1Dir) + verifyManifestMIMEType(t, input1Dir, manifest.DockerV2Schema1SignedMediaType) // 1→1 - assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "dir:"+input1Dir, schema1Registry+":f1t1") - assertSkopeoSucceeds(c, "", "copy", "--src-tls-verify=false", schema1Registry+":f1t1", "dir:"+destDir) - verifyManifestMIMEType(c, destDir, manifest.DockerV2Schema1SignedMediaType) + assertSkopeoSucceeds(t, "", "copy", "--dest-tls-verify=false", "dir:"+input1Dir, schema1Registry+":f1t1") + assertSkopeoSucceeds(t, "", "copy", "--src-tls-verify=false", schema1Registry+":f1t1", "dir:"+destDir) + verifyManifestMIMEType(t, destDir, manifest.DockerV2Schema1SignedMediaType) // 1→2: image stays unmodified schema1 - assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "dir:"+input1Dir, schema2Registry+":f1t2") - assertSkopeoSucceeds(c, "", "copy", "--src-tls-verify=false", schema2Registry+":f1t2", "dir:"+destDir) - verifyManifestMIMEType(c, destDir, manifest.DockerV2Schema1SignedMediaType) + assertSkopeoSucceeds(t, "", "copy", "--dest-tls-verify=false", "dir:"+input1Dir, schema2Registry+":f1t2") + assertSkopeoSucceeds(t, "", "copy", "--src-tls-verify=false", schema2Registry+":f1t2", "dir:"+destDir) + verifyManifestMIMEType(t, destDir, manifest.DockerV2Schema1SignedMediaType) } const regConfFixture = "./fixtures/registries.conf" -func (s *SkopeoSuite) TestSuccessCopySrcWithMirror(c *check.C) { - dir := c.MkDir() +func (s *skopeoSuite) TestSuccessCopySrcWithMirror() { + t := s.T() + dir := t.TempDir() - assertSkopeoSucceeds(c, "", "--registries-conf="+regConfFixture, "copy", + assertSkopeoSucceeds(t, "", "--registries-conf="+regConfFixture, "copy", "docker://mirror.invalid/busybox", "dir:"+dir) } -func (s *SkopeoSuite) TestFailureCopySrcWithMirrorsUnavailable(c *check.C) { - dir := c.MkDir() +func (s *skopeoSuite) TestFailureCopySrcWithMirrorsUnavailable() { + t := s.T() + dir := t.TempDir() // .invalid domains are, per RFC 6761, supposed to result in NXDOMAIN. // With systemd-resolved (used only via NSS?), we instead seem to get “Temporary failure in name resolution” - assertSkopeoFails(c, ".*(no such host|Temporary failure in name resolution).*", + assertSkopeoFails(t, ".*(no such host|Temporary failure in name resolution).*", "--registries-conf="+regConfFixture, "copy", "docker://invalid.invalid/busybox", "dir:"+dir) } -func (s *SkopeoSuite) TestSuccessCopySrcWithMirrorAndPrefix(c *check.C) { - dir := c.MkDir() +func (s *skopeoSuite) TestSuccessCopySrcWithMirrorAndPrefix() { + t := s.T() + dir := t.TempDir() - assertSkopeoSucceeds(c, "", "--registries-conf="+regConfFixture, "copy", + assertSkopeoSucceeds(t, "", "--registries-conf="+regConfFixture, "copy", "docker://gcr.invalid/foo/bar/busybox", "dir:"+dir) } -func (s *SkopeoSuite) TestFailureCopySrcWithMirrorAndPrefixUnavailable(c *check.C) { - dir := c.MkDir() +func (s *skopeoSuite) TestFailureCopySrcWithMirrorAndPrefixUnavailable() { + t := s.T() + dir := t.TempDir() // .invalid domains are, per RFC 6761, supposed to result in NXDOMAIN. // With systemd-resolved (used only via NSS?), we instead seem to get “Temporary failure in name resolution” - assertSkopeoFails(c, ".*(no such host|Temporary failure in name resolution).*", + assertSkopeoFails(t, ".*(no such host|Temporary failure in name resolution).*", "--registries-conf="+regConfFixture, "copy", "docker://gcr.invalid/wrong/prefix/busybox", "dir:"+dir) } -func (s *CopySuite) TestCopyFailsWhenReferenceIsInvalid(c *check.C) { - assertSkopeoFails(c, `.*Invalid image name.*`, "copy", "unknown:transport", "unknown:test") +func (s *copySuite) TestCopyFailsWhenReferenceIsInvalid() { + t := s.T() + assertSkopeoFails(t, `.*Invalid image name.*`, "copy", "unknown:transport", "unknown:test") } diff --git a/integration/openshift.go b/integration/openshift.go index c822c56010..267b734cd3 100644 --- a/integration/openshift.go +++ b/integration/openshift.go @@ -9,10 +9,11 @@ import ( "os/exec" "path/filepath" "strings" + "testing" "time" "github.com/containers/storage/pkg/homedir" - "gopkg.in/check.v1" + "github.com/stretchr/testify/require" ) var adminKUBECONFIG = map[string]string{ @@ -30,16 +31,16 @@ type openshiftCluster struct { // startOpenshiftCluster creates a new openshiftCluster. // WARNING: This affects state in users' home directory! Only run // in isolated test environment. -func startOpenshiftCluster(c *check.C) *openshiftCluster { +func startOpenshiftCluster(t *testing.T) *openshiftCluster { cluster := &openshiftCluster{} - cluster.workingDir = c.MkDir() + cluster.workingDir = t.TempDir() - cluster.startMaster(c) - cluster.prepareRegistryConfig(c) - cluster.startRegistry(c) - cluster.ocLoginToProject(c) - cluster.dockerLogin(c) - cluster.relaxImageSignerPermissions(c) + cluster.startMaster(t) + cluster.prepareRegistryConfig(t) + cluster.startRegistry(t) + cluster.ocLoginToProject(t) + cluster.dockerLogin(t) + cluster.relaxImageSignerPermissions(t) return cluster } @@ -56,21 +57,20 @@ func (cluster *openshiftCluster) clusterCmd(env map[string]string, name string, } // startMaster starts the OpenShift master (etcd+API server) and waits for it to be ready, or terminates on failure. -func (cluster *openshiftCluster) startMaster(c *check.C) { +func (cluster *openshiftCluster) startMaster(t *testing.T) { cmd := cluster.clusterCmd(nil, "openshift", "start", "master") cluster.processes = append(cluster.processes, cmd) stdout, err := cmd.StdoutPipe() - c.Assert(err, check.IsNil) - // Send both to the same pipe. This might cause the two streams to be mixed up, + require.NoError(t, err) // but logging actually goes only to stderr - this primarily ensure we log any // unexpected output to stdout. cmd.Stderr = cmd.Stdout err = cmd.Start() - c.Assert(err, check.IsNil) + require.NoError(t, err) - portOpen, terminatePortCheck := newPortChecker(c, 8443) + portOpen, terminatePortCheck := newPortChecker(t, 8443) defer func() { - c.Logf("Terminating port check") + t.Logf("Terminating port check") terminatePortCheck <- true }() @@ -78,12 +78,12 @@ func (cluster *openshiftCluster) startMaster(c *check.C) { logCheckFound := make(chan bool) go func() { defer func() { - c.Logf("Log checker exiting") + t.Logf("Log checker exiting") }() scanner := bufio.NewScanner(stdout) for scanner.Scan() { line := scanner.Text() - c.Logf("Log line: %s", line) + t.Logf("Log line: %s", line) if strings.Contains(line, "Started Origin Controllers") { logCheckFound <- true return @@ -92,7 +92,7 @@ func (cluster *openshiftCluster) startMaster(c *check.C) { // Note: we can block before we get here. select { case <-terminateLogCheck: - c.Logf("terminated") + t.Logf("terminated") return default: // Do not block here and read the next line. @@ -101,7 +101,7 @@ func (cluster *openshiftCluster) startMaster(c *check.C) { logCheckFound <- false }() defer func() { - c.Logf("Terminating log check") + t.Logf("Terminating log check") terminateLogCheck <- true }() @@ -110,26 +110,26 @@ func (cluster *openshiftCluster) startMaster(c *check.C) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) defer cancel() for !gotPortCheck || !gotLogCheck { - c.Logf("Waiting for master") + t.Logf("Waiting for master") select { case <-portOpen: - c.Logf("port check done") + t.Logf("port check done") gotPortCheck = true case found := <-logCheckFound: - c.Logf("log check done, found: %t", found) + t.Logf("log check done, found: %t", found) if !found { - c.Fatal("log check done, success message not found") + t.Fatal("log check done, success message not found") } gotLogCheck = true case <-ctx.Done(): - c.Fatalf("Timed out waiting for master: %v", ctx.Err()) + t.Fatalf("Timed out waiting for master: %v", ctx.Err()) } } - c.Logf("OK, master started!") + t.Logf("OK, master started!") } // prepareRegistryConfig creates a registry service account and a related k8s client configuration in ${cluster.workingDir}/openshift.local.registry. -func (cluster *openshiftCluster) prepareRegistryConfig(c *check.C) { +func (cluster *openshiftCluster) prepareRegistryConfig(t *testing.T) { // This partially mimics the objects created by (oadm registry), except that we run the // server directly as an ordinary process instead of a pod with an implicitly attached service account. saJSON := `{ @@ -140,93 +140,93 @@ func (cluster *openshiftCluster) prepareRegistryConfig(c *check.C) { } }` cmd := cluster.clusterCmd(adminKUBECONFIG, "oc", "create", "-f", "-") - runExecCmdWithInput(c, cmd, saJSON) + runExecCmdWithInput(t, cmd, saJSON) cmd = cluster.clusterCmd(adminKUBECONFIG, "oadm", "policy", "add-cluster-role-to-user", "system:registry", "-z", "registry") out, err := cmd.CombinedOutput() - c.Assert(err, check.IsNil, check.Commentf("%s", string(out))) - c.Assert(string(out), check.Equals, "cluster role \"system:registry\" added: \"registry\"\n") + require.NoError(t, err, "%s", string(out)) + require.Equal(t, "cluster role \"system:registry\" added: \"registry\"\n", string(out)) cmd = cluster.clusterCmd(adminKUBECONFIG, "oadm", "create-api-client-config", "--client-dir=openshift.local.registry", "--basename=openshift-registry", "--user=system:serviceaccount:default:registry") out, err = cmd.CombinedOutput() - c.Assert(err, check.IsNil, check.Commentf("%s", string(out))) - c.Assert(string(out), check.Equals, "") + require.NoError(t, err, "%s", string(out)) + require.Equal(t, "", string(out)) } // startRegistry starts the OpenShift registry with configPart on port, waits for it to be ready, and returns the process object, or terminates on failure. -func (cluster *openshiftCluster) startRegistryProcess(c *check.C, port uint16, configPath string) *exec.Cmd { +func (cluster *openshiftCluster) startRegistryProcess(t *testing.T, port uint16, configPath string) *exec.Cmd { cmd := cluster.clusterCmd(map[string]string{ "KUBECONFIG": "openshift.local.registry/openshift-registry.kubeconfig", "DOCKER_REGISTRY_URL": fmt.Sprintf("127.0.0.1:%d", port), }, "dockerregistry", configPath) - consumeAndLogOutputs(c, fmt.Sprintf("registry-%d", port), cmd) + consumeAndLogOutputs(t, fmt.Sprintf("registry-%d", port), cmd) err := cmd.Start() - c.Assert(err, check.IsNil) + require.NoError(t, err, "%s") - portOpen, terminatePortCheck := newPortChecker(c, port) + portOpen, terminatePortCheck := newPortChecker(t, port) defer func() { terminatePortCheck <- true }() - c.Logf("Waiting for registry to start") + t.Logf("Waiting for registry to start") ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) defer cancel() select { case <-portOpen: - c.Logf("OK, Registry port open") + t.Logf("OK, Registry port open") case <-ctx.Done(): - c.Fatalf("Timed out waiting for registry to start: %v", ctx.Err()) + t.Fatalf("Timed out waiting for registry to start: %v", ctx.Err()) } return cmd } // startRegistry starts the OpenShift registry and waits for it to be ready, or terminates on failure. -func (cluster *openshiftCluster) startRegistry(c *check.C) { +func (cluster *openshiftCluster) startRegistry(t *testing.T) { // Our “primary” registry - cluster.processes = append(cluster.processes, cluster.startRegistryProcess(c, 5000, "/atomic-registry-config.yml")) + cluster.processes = append(cluster.processes, cluster.startRegistryProcess(t, 5000, "/atomic-registry-config.yml")) // A registry configured with acceptschema2:false - schema1Config := fileFromFixture(c, "/atomic-registry-config.yml", map[string]string{ + schema1Config := fileFromFixture(t, "/atomic-registry-config.yml", map[string]string{ "addr: :5000": "addr: :5005", "rootdirectory: /registry": "rootdirectory: /registry-schema1", // The default configuration currently already contains acceptschema2: false }) // Make sure the configuration contains "acceptschema2: false", because eventually it will be enabled upstream and this function will need to be updated. configContents, err := os.ReadFile(schema1Config) - c.Assert(err, check.IsNil) - c.Assert(string(configContents), check.Matches, "(?s).*acceptschema2: false.*") - cluster.processes = append(cluster.processes, cluster.startRegistryProcess(c, 5005, schema1Config)) + require.NoError(t, err) + require.Regexp(t, "(?s).*acceptschema2: false.*", string(configContents)) + cluster.processes = append(cluster.processes, cluster.startRegistryProcess(t, 5005, schema1Config)) // A registry configured with acceptschema2:true - schema2Config := fileFromFixture(c, "/atomic-registry-config.yml", map[string]string{ + schema2Config := fileFromFixture(t, "/atomic-registry-config.yml", map[string]string{ "addr: :5000": "addr: :5006", "rootdirectory: /registry": "rootdirectory: /registry-schema2", "acceptschema2: false": "acceptschema2: true", }) - cluster.processes = append(cluster.processes, cluster.startRegistryProcess(c, 5006, schema2Config)) + cluster.processes = append(cluster.processes, cluster.startRegistryProcess(t, 5006, schema2Config)) } // ocLogin runs (oc login) and (oc new-project) on the cluster, or terminates on failure. -func (cluster *openshiftCluster) ocLoginToProject(c *check.C) { - c.Logf("oc login") +func (cluster *openshiftCluster) ocLoginToProject(t *testing.T) { + t.Logf("oc login") cmd := cluster.clusterCmd(nil, "oc", "login", "--certificate-authority=openshift.local.config/master/ca.crt", "-u", "myuser", "-p", "mypw", "https://localhost:8443") out, err := cmd.CombinedOutput() - c.Assert(err, check.IsNil, check.Commentf("%s", out)) - c.Assert(string(out), check.Matches, "(?s).*Login successful.*") // (?s) : '.' will also match newlines + require.NoError(t, err, "%s", out) + require.Regexp(t, "(?s).*Login successful.*", string(out)) // (?s) : '.' will also match newlines - outString := combinedOutputOfCommand(c, "oc", "new-project", "myns") - c.Assert(outString, check.Matches, `(?s).*Now using project "myns".*`) // (?s) : '.' will also match newlines + outString := combinedOutputOfCommand(t, "oc", "new-project", "myns") + require.Regexp(t, `(?s).*Now using project "myns".*`, outString) // (?s) : '.' will also match newlines } // dockerLogin simulates (docker login) to the cluster, or terminates on failure. // We do not run (docker login) directly, because that requires a running daemon and a docker package. -func (cluster *openshiftCluster) dockerLogin(c *check.C) { +func (cluster *openshiftCluster) dockerLogin(t *testing.T) { cluster.dockerDir = filepath.Join(homedir.Get(), ".docker") err := os.Mkdir(cluster.dockerDir, 0700) - c.Assert(err, check.IsNil) + require.NoError(t, err) - out := combinedOutputOfCommand(c, "oc", "config", "view", "-o", "json", "-o", "jsonpath={.users[*].user.token}") - c.Logf("oc config value: %s", out) + out := combinedOutputOfCommand(t, "oc", "config", "view", "-o", "json", "-o", "jsonpath={.users[*].user.token}") + t.Logf("oc config value: %s", out) authValue := base64.StdEncoding.EncodeToString([]byte("unused:" + out)) auths := []string{} for _, port := range []int{5000, 5005, 5006} { @@ -237,22 +237,22 @@ func (cluster *openshiftCluster) dockerLogin(c *check.C) { } configJSON := `{"auths": {` + strings.Join(auths, ",") + `}}` err = os.WriteFile(filepath.Join(cluster.dockerDir, "config.json"), []byte(configJSON), 0600) - c.Assert(err, check.IsNil) + require.NoError(t, err) } // relaxImageSignerPermissions opens up the system:image-signer permissions so that // anyone can work with signatures // FIXME: This also allows anyone to DoS anyone else; this design is really not all // that workable, but it is the best we can do for now. -func (cluster *openshiftCluster) relaxImageSignerPermissions(c *check.C) { +func (cluster *openshiftCluster) relaxImageSignerPermissions(t *testing.T) { cmd := cluster.clusterCmd(adminKUBECONFIG, "oadm", "policy", "add-cluster-role-to-group", "system:image-signer", "system:authenticated") out, err := cmd.CombinedOutput() - c.Assert(err, check.IsNil, check.Commentf("%s", string(out))) - c.Assert(string(out), check.Equals, "cluster role \"system:image-signer\" added: \"system:authenticated\"\n") + require.NoError(t, err, "%s", string(out)) + require.Equal(t, "cluster role \"system:image-signer\" added: \"system:authenticated\"\n", string(out)) } // tearDown stops the cluster services and deletes (only some!) of the state. -func (cluster *openshiftCluster) tearDown(c *check.C) { +func (cluster *openshiftCluster) tearDown(t *testing.T) { for i := len(cluster.processes) - 1; i >= 0; i-- { // It’s undocumented what Kill() returns if the process has terminated, // so we couldn’t check just for that. This is running in a container anyway… @@ -260,6 +260,6 @@ func (cluster *openshiftCluster) tearDown(c *check.C) { } if cluster.dockerDir != "" { err := os.RemoveAll(cluster.dockerDir) - c.Assert(err, check.IsNil) + require.NoError(t, err) } } diff --git a/integration/openshift_shell_test.go b/integration/openshift_shell_test.go index 3e43ab0233..e44b189d6f 100644 --- a/integration/openshift_shell_test.go +++ b/integration/openshift_shell_test.go @@ -7,7 +7,8 @@ import ( "os" "os/exec" - "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) /* @@ -20,7 +21,7 @@ To use it, run: to start a container, then within the container: - SKOPEO_CONTAINER_TESTS=1 PS1='nested> ' go test -tags openshift_shell -timeout=24h ./integration -v -check.v -check.vv -check.f='CopySuite.TestRunShell' + SKOPEO_CONTAINER_TESTS=1 PS1='nested> ' go test -tags openshift_shell -timeout=24h ./integration -v -run='copySuite.TestRunShell' An example of what can be done within the container: @@ -33,13 +34,14 @@ An example of what can be done within the container: curl -L -v 'http://localhost:5000/v2/myns/personal/manifests/personal' --header 'Authorization: Bearer $token_from_oauth' curl -L -v 'http://localhost:5000/extensions/v2/myns/personal/signatures/$manifest_digest' --header 'Authorization: Bearer $token_from_oauth' */ -func (s *CopySuite) TestRunShell(c *check.C) { +func (s *copySuite) TestRunShell() { + t := s.T() cmd := exec.Command("bash", "-i") tty, err := os.OpenFile("/dev/tty", os.O_RDWR, 0) - c.Assert(err, check.IsNil) + require.NoError(t, err) cmd.Stdin = tty cmd.Stdout = tty cmd.Stderr = tty err = cmd.Run() - c.Assert(err, check.IsNil) + assert.NoError(t, err) } diff --git a/integration/proxy_test.go b/integration/proxy_test.go index 7fd794b4af..8e1bc04a10 100644 --- a/integration/proxy_test.go +++ b/integration/proxy_test.go @@ -9,11 +9,14 @@ import ( "os/exec" "strings" "syscall" + "testing" "time" "github.com/containers/image/v5/manifest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" ) // This image is known to be x86_64 only right now @@ -213,17 +216,12 @@ func newProxy() (*proxy, error) { return p, nil } -func init() { - check.Suite(&ProxySuite{}) +func TestProxy(t *testing.T) { + suite.Run(t, &proxySuite{}) } -type ProxySuite struct { -} - -func (s *ProxySuite) SetUpSuite(c *check.C) { -} - -func (s *ProxySuite) TearDownSuite(c *check.C) { +type proxySuite struct { + suite.Suite } type byteFetch struct { @@ -334,25 +332,26 @@ func runTestOpenImageOptionalNotFound(p *proxy, img string) error { return nil } -func (s *ProxySuite) TestProxy(c *check.C) { +func (s *proxySuite) TestProxy() { + t := s.T() p, err := newProxy() - c.Assert(err, check.IsNil) + require.NoError(t, err) err = runTestGetManifestAndConfig(p, knownNotManifestListedImageX8664) if err != nil { err = fmt.Errorf("Testing image %s: %v", knownNotManifestListedImageX8664, err) } - c.Assert(err, check.IsNil) + assert.NoError(t, err) err = runTestGetManifestAndConfig(p, knownListImage) if err != nil { err = fmt.Errorf("Testing image %s: %v", knownListImage, err) } - c.Assert(err, check.IsNil) + assert.NoError(t, err) err = runTestOpenImageOptionalNotFound(p, knownNotExtantImage) if err != nil { err = fmt.Errorf("Testing optional image %s: %v", knownNotExtantImage, err) } - c.Assert(err, check.IsNil) + assert.NoError(t, err) } diff --git a/integration/registry.go b/integration/registry.go index dba34d7a6b..8ea5c5873a 100644 --- a/integration/registry.go +++ b/integration/registry.go @@ -6,9 +6,10 @@ import ( "os" "os/exec" "path/filepath" + "testing" "time" - "gopkg.in/check.v1" + "github.com/stretchr/testify/require" ) const ( @@ -24,9 +25,9 @@ type testRegistryV2 struct { email string } -func setupRegistryV2At(c *check.C, url string, auth, schema1 bool) *testRegistryV2 { - reg, err := newTestRegistryV2At(c, url, auth, schema1) - c.Assert(err, check.IsNil) +func setupRegistryV2At(t *testing.T, url string, auth, schema1 bool) *testRegistryV2 { + reg, err := newTestRegistryV2At(t, url, auth, schema1) + require.NoError(t, err) // Wait for registry to be ready to serve requests. for i := 0; i != 50; i++ { @@ -37,13 +38,13 @@ func setupRegistryV2At(c *check.C, url string, auth, schema1 bool) *testRegistry } if err != nil { - c.Fatal("Timeout waiting for test registry to become available") + t.Fatal("Timeout waiting for test registry to become available") } return reg } -func newTestRegistryV2At(c *check.C, url string, auth, schema1 bool) (*testRegistryV2, error) { - tmp := c.MkDir() +func newTestRegistryV2At(t *testing.T, url string, auth, schema1 bool) (*testRegistryV2, error) { + tmp := t.TempDir() template := `version: 0.1 loglevel: debug storage: @@ -94,10 +95,10 @@ compatibility: cmd = exec.Command(binaryV2, "serve", confPath) } - consumeAndLogOutputs(c, fmt.Sprintf("registry-%s", url), cmd) + consumeAndLogOutputs(t, fmt.Sprintf("registry-%s", url), cmd) if err := cmd.Start(); err != nil { if os.IsNotExist(err) { - c.Skip(err.Error()) + t.Skip(err.Error()) } return nil, err } @@ -110,9 +111,9 @@ compatibility: }, nil } -func (t *testRegistryV2) Ping() error { +func (r *testRegistryV2) Ping() error { // We always ping through HTTP for our test registry. - resp, err := http.Get(fmt.Sprintf("http://%s/v2/", t.url)) + resp, err := http.Get(fmt.Sprintf("http://%s/v2/", r.url)) if err != nil { return err } @@ -123,8 +124,8 @@ func (t *testRegistryV2) Ping() error { return nil } -func (t *testRegistryV2) tearDown(c *check.C) { +func (r *testRegistryV2) tearDown(t *testing.T) { // It’s undocumented what Kill() returns if the process has terminated, // so we couldn’t check just for that. This is running in a container anyway… - _ = t.cmd.Process.Kill() + _ = r.cmd.Process.Kill() } diff --git a/integration/signing_test.go b/integration/signing_test.go index 56142cab1e..99742d90b0 100644 --- a/integration/signing_test.go +++ b/integration/signing_test.go @@ -6,23 +6,28 @@ import ( "os" "os/exec" "strings" + "testing" "github.com/containers/image/v5/signature" - "gopkg.in/check.v1" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" ) const ( gpgBinary = "gpg" ) -func init() { - check.Suite(&SigningSuite{}) +func TestSigning(t *testing.T) { + suite.Run(t, &signingSuite{}) } -type SigningSuite struct { +type signingSuite struct { + suite.Suite fingerprint string } +var _ = suite.SetupAllSuite(&signingSuite{}) + func findFingerprint(lineBytes []byte) (string, error) { lines := string(lineBytes) for _, line := range strings.Split(lines, "\n") { @@ -34,43 +39,41 @@ func findFingerprint(lineBytes []byte) (string, error) { return "", errors.New("No fingerprint found") } -func (s *SigningSuite) SetUpSuite(c *check.C) { +func (s *signingSuite) SetupSuite() { + t := s.T() _, err := exec.LookPath(skopeoBinary) - c.Assert(err, check.IsNil) + require.NoError(t, err) - gpgHome := c.MkDir() - os.Setenv("GNUPGHOME", gpgHome) + gpgHome := t.TempDir() + t.Setenv("GNUPGHOME", gpgHome) - runCommandWithInput(c, "Key-Type: RSA\nName-Real: Testing user\n%no-protection\n%commit\n", gpgBinary, "--homedir", gpgHome, "--batch", "--gen-key") + runCommandWithInput(t, "Key-Type: RSA\nName-Real: Testing user\n%no-protection\n%commit\n", gpgBinary, "--homedir", gpgHome, "--batch", "--gen-key") lines, err := exec.Command(gpgBinary, "--homedir", gpgHome, "--with-colons", "--no-permission-warning", "--fingerprint").Output() - c.Assert(err, check.IsNil) + require.NoError(t, err) s.fingerprint, err = findFingerprint(lines) - c.Assert(err, check.IsNil) -} - -func (s *SigningSuite) TearDownSuite(c *check.C) { - os.Unsetenv("GNUPGHOME") + require.NoError(t, err) } -func (s *SigningSuite) TestSignVerifySmoke(c *check.C) { +func (s *signingSuite) TestSignVerifySmoke() { + t := s.T() mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{}) - c.Assert(err, check.IsNil) + require.NoError(t, err) defer mech.Close() if err := mech.SupportsSigning(); err != nil { // FIXME? Test that verification and policy enforcement works, using signatures from fixtures - c.Skip(fmt.Sprintf("Signing not supported: %v", err)) + t.Skipf("Signing not supported: %v", err) } manifestPath := "fixtures/image.manifest.json" dockerReference := "testing/smoketest" sigOutput, err := os.CreateTemp("", "sig") - c.Assert(err, check.IsNil) + require.NoError(t, err) defer os.Remove(sigOutput.Name()) - assertSkopeoSucceeds(c, "^$", "standalone-sign", "-o", sigOutput.Name(), + assertSkopeoSucceeds(t, "^$", "standalone-sign", "-o", sigOutput.Name(), manifestPath, dockerReference, s.fingerprint) expected := fmt.Sprintf("^Signature verified, digest %s\n$", TestImageManifestDigest) - assertSkopeoSucceeds(c, expected, "standalone-verify", manifestPath, + assertSkopeoSucceeds(t, expected, "standalone-verify", manifestPath, dockerReference, s.fingerprint, sigOutput.Name()) } diff --git a/integration/sync_test.go b/integration/sync_test.go index 16569bafe7..cae4f84a42 100644 --- a/integration/sync_test.go +++ b/integration/sync_test.go @@ -9,13 +9,16 @@ import ( "path/filepath" "regexp" "strings" + "testing" "github.com/containers/image/v5/docker" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" ) const ( @@ -33,30 +36,36 @@ const ( pullableRepoWithLatestTag = "k8s.gcr.io/pause" ) -func init() { - check.Suite(&SyncSuite{}) +func TestSync(t *testing.T) { + suite.Run(t, &syncSuite{}) } -type SyncSuite struct { +type syncSuite struct { + suite.Suite cluster *openshiftCluster registry *testRegistryV2 } -func (s *SyncSuite) SetUpSuite(c *check.C) { +var _ = suite.SetupAllSuite(&syncSuite{}) +var _ = suite.TearDownAllSuite(&syncSuite{}) + +func (s *syncSuite) SetupSuite() { + t := s.T() + const registryAuth = false const registrySchema1 = false if os.Getenv("SKOPEO_LOCAL_TESTS") == "1" { - c.Log("Running tests without a container") + t.Log("Running tests without a container") fmt.Printf("NOTE: tests requires a V2 registry at url=%s, with auth=%t, schema1=%t \n", v2DockerRegistryURL, registryAuth, registrySchema1) return } if os.Getenv("SKOPEO_CONTAINER_TESTS") != "1" { - c.Skip("Not running in a container, refusing to affect user state") + t.Skip("Not running in a container, refusing to affect user state") } - s.cluster = startOpenshiftCluster(c) // FIXME: Set up TLS for the docker registry port instead of using "--tls-verify=false" all over the place. + s.cluster = startOpenshiftCluster(t) // FIXME: Set up TLS for the docker registry port instead of using "--tls-verify=false" all over the place. for _, stream := range []string{"unsigned", "personal", "official", "naming", "cosigned", "compression", "schema1", "schema2"} { isJSON := fmt.Sprintf(`{ @@ -67,41 +76,42 @@ func (s *SyncSuite) SetUpSuite(c *check.C) { }, "spec": {} }`, stream) - runCommandWithInput(c, isJSON, "oc", "create", "-f", "-") + runCommandWithInput(t, isJSON, "oc", "create", "-f", "-") } // FIXME: Set up TLS for the docker registry port instead of using "--tls-verify=false" all over the place. - s.registry = setupRegistryV2At(c, v2DockerRegistryURL, registryAuth, registrySchema1) + s.registry = setupRegistryV2At(t, v2DockerRegistryURL, registryAuth, registrySchema1) - gpgHome := c.MkDir() - os.Setenv("GNUPGHOME", gpgHome) + gpgHome := t.TempDir() + t.Setenv("GNUPGHOME", gpgHome) for _, key := range []string{"personal", "official"} { batchInput := fmt.Sprintf("Key-Type: RSA\nName-Real: Test key - %s\nName-email: %s@example.com\n%%no-protection\n%%commit\n", key, key) - runCommandWithInput(c, batchInput, gpgBinary, "--batch", "--gen-key") + runCommandWithInput(t, batchInput, gpgBinary, "--batch", "--gen-key") - out := combinedOutputOfCommand(c, gpgBinary, "--armor", "--export", fmt.Sprintf("%s@example.com", key)) + out := combinedOutputOfCommand(t, gpgBinary, "--armor", "--export", fmt.Sprintf("%s@example.com", key)) err := os.WriteFile(filepath.Join(gpgHome, fmt.Sprintf("%s-pubkey.gpg", key)), []byte(out), 0600) - c.Assert(err, check.IsNil) + require.NoError(t, err) } } -func (s *SyncSuite) TearDownSuite(c *check.C) { +func (s *syncSuite) TearDownSuite() { + t := s.T() if os.Getenv("SKOPEO_LOCAL_TESTS") == "1" { return } if s.registry != nil { - s.registry.tearDown(c) + s.registry.tearDown(t) } if s.cluster != nil { - s.cluster.tearDown(c) + s.cluster.tearDown(t) } } -func assertNumberOfManifestsInSubdirs(c *check.C, dir string, expectedCount int) { +func assertNumberOfManifestsInSubdirs(t *testing.T, dir string, expectedCount int) { nManifests := 0 err := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { if err != nil { @@ -113,156 +123,163 @@ func assertNumberOfManifestsInSubdirs(c *check.C, dir string, expectedCount int) } return nil }) - c.Assert(err, check.IsNil) - c.Assert(nManifests, check.Equals, expectedCount) + require.NoError(t, err) + assert.Equal(t, expectedCount, nManifests) } -func (s *SyncSuite) TestDocker2DirTagged(c *check.C) { - tmpDir := c.MkDir() +func (s *syncSuite) TestDocker2DirTagged() { + t := s.T() + tmpDir := t.TempDir() // FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection. image := pullableTaggedImage imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image)) - c.Assert(err, check.IsNil) + require.NoError(t, err) imagePath := imageRef.DockerReference().String() dir1 := path.Join(tmpDir, "dir1") dir2 := path.Join(tmpDir, "dir2") // sync docker => dir - assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "docker", "--dest", "dir", image, dir1) + assertSkopeoSucceeds(t, "", "sync", "--scoped", "--src", "docker", "--dest", "dir", image, dir1) _, err = os.Stat(path.Join(dir1, imagePath, "manifest.json")) - c.Assert(err, check.IsNil) + require.NoError(t, err) // copy docker => dir - assertSkopeoSucceeds(c, "", "copy", "docker://"+image, "dir:"+dir2) + assertSkopeoSucceeds(t, "", "copy", "docker://"+image, "dir:"+dir2) _, err = os.Stat(path.Join(dir2, "manifest.json")) - c.Assert(err, check.IsNil) + require.NoError(t, err) - out := combinedOutputOfCommand(c, "diff", "-urN", path.Join(dir1, imagePath), dir2) - c.Assert(out, check.Equals, "") + out := combinedOutputOfCommand(t, "diff", "-urN", path.Join(dir1, imagePath), dir2) + assert.Equal(t, "", out) } -func (s *SyncSuite) TestDocker2DirTaggedAll(c *check.C) { - tmpDir := c.MkDir() +func (s *syncSuite) TestDocker2DirTaggedAll() { + t := s.T() + tmpDir := t.TempDir() // FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection. image := pullableTaggedManifestList imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image)) - c.Assert(err, check.IsNil) + require.NoError(t, err) imagePath := imageRef.DockerReference().String() dir1 := path.Join(tmpDir, "dir1") dir2 := path.Join(tmpDir, "dir2") // sync docker => dir - assertSkopeoSucceeds(c, "", "sync", "--all", "--scoped", "--src", "docker", "--dest", "dir", image, dir1) + assertSkopeoSucceeds(t, "", "sync", "--all", "--scoped", "--src", "docker", "--dest", "dir", image, dir1) _, err = os.Stat(path.Join(dir1, imagePath, "manifest.json")) - c.Assert(err, check.IsNil) + require.NoError(t, err) // copy docker => dir - assertSkopeoSucceeds(c, "", "copy", "--all", "docker://"+image, "dir:"+dir2) + assertSkopeoSucceeds(t, "", "copy", "--all", "docker://"+image, "dir:"+dir2) _, err = os.Stat(path.Join(dir2, "manifest.json")) - c.Assert(err, check.IsNil) + require.NoError(t, err) - out := combinedOutputOfCommand(c, "diff", "-urN", path.Join(dir1, imagePath), dir2) - c.Assert(out, check.Equals, "") + out := combinedOutputOfCommand(t, "diff", "-urN", path.Join(dir1, imagePath), dir2) + assert.Equal(t, "", out) } -func (s *SyncSuite) TestPreserveDigests(c *check.C) { - tmpDir := c.MkDir() +func (s *syncSuite) TestPreserveDigests() { + t := s.T() + tmpDir := t.TempDir() // FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection. image := pullableTaggedManifestList // copy docker => dir - assertSkopeoSucceeds(c, "", "copy", "--all", "--preserve-digests", "docker://"+image, "dir:"+tmpDir) + assertSkopeoSucceeds(t, "", "copy", "--all", "--preserve-digests", "docker://"+image, "dir:"+tmpDir) _, err := os.Stat(path.Join(tmpDir, "manifest.json")) - c.Assert(err, check.IsNil) + require.NoError(t, err) - assertSkopeoFails(c, ".*Instructed to preserve digests.*", "copy", "--all", "--preserve-digests", "--format=oci", "docker://"+image, "dir:"+tmpDir) + assertSkopeoFails(t, ".*Instructed to preserve digests.*", "copy", "--all", "--preserve-digests", "--format=oci", "docker://"+image, "dir:"+tmpDir) } -func (s *SyncSuite) TestScoped(c *check.C) { +func (s *syncSuite) TestScoped() { + t := s.T() // FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection. image := pullableTaggedImage imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image)) - c.Assert(err, check.IsNil) + require.NoError(t, err) imagePath := imageRef.DockerReference().String() - dir1 := c.MkDir() - assertSkopeoSucceeds(c, "", "sync", "--src", "docker", "--dest", "dir", image, dir1) + dir1 := t.TempDir() + assertSkopeoSucceeds(t, "", "sync", "--src", "docker", "--dest", "dir", image, dir1) _, err = os.Stat(path.Join(dir1, path.Base(imagePath), "manifest.json")) - c.Assert(err, check.IsNil) + require.NoError(t, err) - assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "docker", "--dest", "dir", image, dir1) + assertSkopeoSucceeds(t, "", "sync", "--scoped", "--src", "docker", "--dest", "dir", image, dir1) _, err = os.Stat(path.Join(dir1, imagePath, "manifest.json")) - c.Assert(err, check.IsNil) + require.NoError(t, err) } -func (s *SyncSuite) TestDirIsNotOverwritten(c *check.C) { +func (s *syncSuite) TestDirIsNotOverwritten() { + t := s.T() // FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection. image := pullableRepoWithLatestTag imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image)) - c.Assert(err, check.IsNil) + require.NoError(t, err) imagePath := imageRef.DockerReference().String() // make a copy of the image in the local registry - assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "docker://"+image, "docker://"+path.Join(v2DockerRegistryURL, reference.Path(imageRef.DockerReference()))) + assertSkopeoSucceeds(t, "", "copy", "--dest-tls-verify=false", "docker://"+image, "docker://"+path.Join(v2DockerRegistryURL, reference.Path(imageRef.DockerReference()))) //sync upstream image to dir, not scoped - dir1 := c.MkDir() - assertSkopeoSucceeds(c, "", "sync", "--src", "docker", "--dest", "dir", image, dir1) + dir1 := t.TempDir() + assertSkopeoSucceeds(t, "", "sync", "--src", "docker", "--dest", "dir", image, dir1) _, err = os.Stat(path.Join(dir1, path.Base(imagePath), "manifest.json")) - c.Assert(err, check.IsNil) + require.NoError(t, err) //sync local registry image to dir, not scoped - assertSkopeoFails(c, ".*Refusing to overwrite destination directory.*", "sync", "--src-tls-verify=false", "--src", "docker", "--dest", "dir", path.Join(v2DockerRegistryURL, reference.Path(imageRef.DockerReference())), dir1) + assertSkopeoFails(t, ".*Refusing to overwrite destination directory.*", "sync", "--src-tls-verify=false", "--src", "docker", "--dest", "dir", path.Join(v2DockerRegistryURL, reference.Path(imageRef.DockerReference())), dir1) //sync local registry image to dir, scoped imageRef, err = docker.ParseReference(fmt.Sprintf("//%s", path.Join(v2DockerRegistryURL, reference.Path(imageRef.DockerReference())))) - c.Assert(err, check.IsNil) + require.NoError(t, err) imagePath = imageRef.DockerReference().String() - assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src-tls-verify=false", "--src", "docker", "--dest", "dir", path.Join(v2DockerRegistryURL, reference.Path(imageRef.DockerReference())), dir1) + assertSkopeoSucceeds(t, "", "sync", "--scoped", "--src-tls-verify=false", "--src", "docker", "--dest", "dir", path.Join(v2DockerRegistryURL, reference.Path(imageRef.DockerReference())), dir1) _, err = os.Stat(path.Join(dir1, imagePath, "manifest.json")) - c.Assert(err, check.IsNil) + require.NoError(t, err) } -func (s *SyncSuite) TestDocker2DirUntagged(c *check.C) { - tmpDir := c.MkDir() +func (s *syncSuite) TestDocker2DirUntagged() { + t := s.T() + tmpDir := t.TempDir() // FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection. image := pullableRepo imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image)) - c.Assert(err, check.IsNil) + require.NoError(t, err) imagePath := imageRef.DockerReference().String() dir1 := path.Join(tmpDir, "dir1") - assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "docker", "--dest", "dir", image, dir1) + assertSkopeoSucceeds(t, "", "sync", "--scoped", "--src", "docker", "--dest", "dir", image, dir1) sysCtx := types.SystemContext{} tags, err := docker.GetRepositoryTags(context.Background(), &sysCtx, imageRef) - c.Assert(err, check.IsNil) - c.Check(len(tags), check.Not(check.Equals), 0) + require.NoError(t, err) + assert.NotZero(t, len(tags)) nManifests, err := filepath.Glob(path.Join(dir1, path.Dir(imagePath), "*", "manifest.json")) - c.Assert(err, check.IsNil) - c.Assert(len(nManifests), check.Equals, len(tags)) + require.NoError(t, err) + assert.Len(t, nManifests, len(tags)) } -func (s *SyncSuite) TestYamlUntagged(c *check.C) { - tmpDir := c.MkDir() +func (s *syncSuite) TestYamlUntagged() { + t := s.T() + tmpDir := t.TempDir() dir1 := path.Join(tmpDir, "dir1") image := pullableRepo imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image)) - c.Assert(err, check.IsNil) + require.NoError(t, err) imagePath := imageRef.DockerReference().Name() sysCtx := types.SystemContext{} tags, err := docker.GetRepositoryTags(context.Background(), &sysCtx, imageRef) - c.Assert(err, check.IsNil) - c.Check(len(tags), check.Not(check.Equals), 0) + require.NoError(t, err) + assert.NotZero(t, len(tags)) yamlConfig := fmt.Sprintf(` %s: @@ -273,8 +290,8 @@ func (s *SyncSuite) TestYamlUntagged(c *check.C) { // sync to the local registry yamlFile := path.Join(tmpDir, "registries.yaml") err = os.WriteFile(yamlFile, []byte(yamlConfig), 0644) - c.Assert(err, check.IsNil) - assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "docker", "--dest-tls-verify=false", yamlFile, v2DockerRegistryURL) + require.NoError(t, err) + assertSkopeoSucceeds(t, "", "sync", "--scoped", "--src", "yaml", "--dest", "docker", "--dest-tls-verify=false", yamlFile, v2DockerRegistryURL) // sync back from local registry to a folder os.Remove(yamlFile) yamlConfig = fmt.Sprintf(` @@ -285,23 +302,24 @@ func (s *SyncSuite) TestYamlUntagged(c *check.C) { `, v2DockerRegistryURL, imagePath) err = os.WriteFile(yamlFile, []byte(yamlConfig), 0644) - c.Assert(err, check.IsNil) - assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1) + require.NoError(t, err) + assertSkopeoSucceeds(t, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1) sysCtx = types.SystemContext{ DockerInsecureSkipTLSVerify: types.NewOptionalBool(true), } localImageRef, err := docker.ParseReference(fmt.Sprintf("//%s/%s", v2DockerRegistryURL, imagePath)) - c.Assert(err, check.IsNil) + require.NoError(t, err) localTags, err := docker.GetRepositoryTags(context.Background(), &sysCtx, localImageRef) - c.Assert(err, check.IsNil) - c.Check(len(localTags), check.Not(check.Equals), 0) - c.Assert(len(localTags), check.Equals, len(tags)) - assertNumberOfManifestsInSubdirs(c, dir1, len(tags)) + require.NoError(t, err) + assert.NotZero(t, len(localTags)) + assert.Len(t, localTags, len(tags)) + assertNumberOfManifestsInSubdirs(t, dir1, len(tags)) } -func (s *SyncSuite) TestYamlRegex2Dir(c *check.C) { - tmpDir := c.MkDir() +func (s *syncSuite) TestYamlRegex2Dir() { + t := s.T() + tmpDir := t.TempDir() dir1 := path.Join(tmpDir, "dir1") yamlConfig := ` @@ -311,17 +329,18 @@ k8s.gcr.io: ` // the ↑ regex strings always matches only 2 images var nTags = 2 - c.Assert(nTags, check.Not(check.Equals), 0) + assert.NotZero(t, nTags) yamlFile := path.Join(tmpDir, "registries.yaml") err := os.WriteFile(yamlFile, []byte(yamlConfig), 0644) - c.Assert(err, check.IsNil) - assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1) - assertNumberOfManifestsInSubdirs(c, dir1, nTags) + require.NoError(t, err) + assertSkopeoSucceeds(t, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1) + assertNumberOfManifestsInSubdirs(t, dir1, nTags) } -func (s *SyncSuite) TestYamlDigest2Dir(c *check.C) { - tmpDir := c.MkDir() +func (s *syncSuite) TestYamlDigest2Dir() { + t := s.T() + tmpDir := t.TempDir() dir1 := path.Join(tmpDir, "dir1") yamlConfig := ` @@ -332,13 +351,14 @@ k8s.gcr.io: ` yamlFile := path.Join(tmpDir, "registries.yaml") err := os.WriteFile(yamlFile, []byte(yamlConfig), 0644) - c.Assert(err, check.IsNil) - assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1) - assertNumberOfManifestsInSubdirs(c, dir1, 1) + require.NoError(t, err) + assertSkopeoSucceeds(t, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1) + assertNumberOfManifestsInSubdirs(t, dir1, 1) } -func (s *SyncSuite) TestYaml2Dir(c *check.C) { - tmpDir := c.MkDir() +func (s *syncSuite) TestYaml2Dir() { + t := s.T() + tmpDir := t.TempDir() dir1 := path.Join(tmpDir, "dir1") yamlConfig := ` @@ -366,25 +386,26 @@ quay.io: nTags++ } } - c.Assert(nTags, check.Not(check.Equals), 0) + assert.NotZero(t, nTags) yamlFile := path.Join(tmpDir, "registries.yaml") err := os.WriteFile(yamlFile, []byte(yamlConfig), 0644) - c.Assert(err, check.IsNil) - assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1) - assertNumberOfManifestsInSubdirs(c, dir1, nTags) + require.NoError(t, err) + assertSkopeoSucceeds(t, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1) + assertNumberOfManifestsInSubdirs(t, dir1, nTags) } -func (s *SyncSuite) TestYamlTLSVerify(c *check.C) { +func (s *syncSuite) TestYamlTLSVerify() { + t := s.T() const localRegURL = "docker://" + v2DockerRegistryURL + "/" - tmpDir := c.MkDir() + tmpDir := t.TempDir() dir1 := path.Join(tmpDir, "dir1") image := pullableRepoWithLatestTag tag := "latest" // FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection. // copy docker => docker - assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "docker://"+image+":"+tag, localRegURL+image+":"+tag) + assertSkopeoSucceeds(t, "", "copy", "--dest-tls-verify=false", "docker://"+image+":"+tag, localRegURL+image+":"+tag) yamlTemplate := ` %s: @@ -396,7 +417,7 @@ func (s *SyncSuite) TestYamlTLSVerify(c *check.C) { testCfg := []struct { tlsVerify string msg string - checker func(c *check.C, regexp string, args ...string) + checker func(t *testing.T, regexp string, args ...string) }{ { tlsVerify: "tls-verify: false", @@ -420,17 +441,18 @@ func (s *SyncSuite) TestYamlTLSVerify(c *check.C) { yamlConfig := fmt.Sprintf(yamlTemplate, v2DockerRegistryURL, cfg.tlsVerify, image, tag) yamlFile := path.Join(tmpDir, "registries.yaml") err := os.WriteFile(yamlFile, []byte(yamlConfig), 0644) - c.Assert(err, check.IsNil) + require.NoError(t, err) - cfg.checker(c, cfg.msg, "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1) + cfg.checker(t, cfg.msg, "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1) os.Remove(yamlFile) os.RemoveAll(dir1) } } -func (s *SyncSuite) TestSyncManifestOutput(c *check.C) { - tmpDir := c.MkDir() +func (s *syncSuite) TestSyncManifestOutput() { + t := s.T() + tmpDir := t.TempDir() destDir1 := filepath.Join(tmpDir, "dest1") destDir2 := filepath.Join(tmpDir, "dest2") @@ -439,154 +461,162 @@ func (s *SyncSuite) TestSyncManifestOutput(c *check.C) { //Split image:tag path from image URI for manifest comparison imageDir := pullableTaggedImage[strings.LastIndex(pullableTaggedImage, "/")+1:] - assertSkopeoSucceeds(c, "", "sync", "--format=oci", "--all", "--src", "docker", "--dest", "dir", pullableTaggedImage, destDir1) - verifyManifestMIMEType(c, filepath.Join(destDir1, imageDir), imgspecv1.MediaTypeImageManifest) - assertSkopeoSucceeds(c, "", "sync", "--format=v2s2", "--all", "--src", "docker", "--dest", "dir", pullableTaggedImage, destDir2) - verifyManifestMIMEType(c, filepath.Join(destDir2, imageDir), manifest.DockerV2Schema2MediaType) - assertSkopeoSucceeds(c, "", "sync", "--format=v2s1", "--all", "--src", "docker", "--dest", "dir", pullableTaggedImage, destDir3) - verifyManifestMIMEType(c, filepath.Join(destDir3, imageDir), manifest.DockerV2Schema1SignedMediaType) + assertSkopeoSucceeds(t, "", "sync", "--format=oci", "--all", "--src", "docker", "--dest", "dir", pullableTaggedImage, destDir1) + verifyManifestMIMEType(t, filepath.Join(destDir1, imageDir), imgspecv1.MediaTypeImageManifest) + assertSkopeoSucceeds(t, "", "sync", "--format=v2s2", "--all", "--src", "docker", "--dest", "dir", pullableTaggedImage, destDir2) + verifyManifestMIMEType(t, filepath.Join(destDir2, imageDir), manifest.DockerV2Schema2MediaType) + assertSkopeoSucceeds(t, "", "sync", "--format=v2s1", "--all", "--src", "docker", "--dest", "dir", pullableTaggedImage, destDir3) + verifyManifestMIMEType(t, filepath.Join(destDir3, imageDir), manifest.DockerV2Schema1SignedMediaType) } -func (s *SyncSuite) TestDocker2DockerTagged(c *check.C) { +func (s *syncSuite) TestDocker2DockerTagged() { + t := s.T() const localRegURL = "docker://" + v2DockerRegistryURL + "/" - tmpDir := c.MkDir() + tmpDir := t.TempDir() // FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection. image := pullableTaggedImage imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image)) - c.Assert(err, check.IsNil) + require.NoError(t, err) imagePath := imageRef.DockerReference().String() dir1 := path.Join(tmpDir, "dir1") dir2 := path.Join(tmpDir, "dir2") // sync docker => docker - assertSkopeoSucceeds(c, "", "sync", "--scoped", "--dest-tls-verify=false", "--src", "docker", "--dest", "docker", image, v2DockerRegistryURL) + assertSkopeoSucceeds(t, "", "sync", "--scoped", "--dest-tls-verify=false", "--src", "docker", "--dest", "docker", image, v2DockerRegistryURL) // copy docker => dir - assertSkopeoSucceeds(c, "", "copy", "docker://"+image, "dir:"+dir1) + assertSkopeoSucceeds(t, "", "copy", "docker://"+image, "dir:"+dir1) _, err = os.Stat(path.Join(dir1, "manifest.json")) - c.Assert(err, check.IsNil) + require.NoError(t, err) // copy docker => dir - assertSkopeoSucceeds(c, "", "copy", "--src-tls-verify=false", localRegURL+imagePath, "dir:"+dir2) + assertSkopeoSucceeds(t, "", "copy", "--src-tls-verify=false", localRegURL+imagePath, "dir:"+dir2) _, err = os.Stat(path.Join(dir2, "manifest.json")) - c.Assert(err, check.IsNil) + require.NoError(t, err) - out := combinedOutputOfCommand(c, "diff", "-urN", dir1, dir2) - c.Assert(out, check.Equals, "") + out := combinedOutputOfCommand(t, "diff", "-urN", dir1, dir2) + assert.Equal(t, "", out) } -func (s *SyncSuite) TestDir2DockerTagged(c *check.C) { +func (s *syncSuite) TestDir2DockerTagged() { + t := s.T() const localRegURL = "docker://" + v2DockerRegistryURL + "/" - tmpDir := c.MkDir() + tmpDir := t.TempDir() // FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection. image := pullableRepoWithLatestTag dir1 := path.Join(tmpDir, "dir1") err := os.Mkdir(dir1, 0755) - c.Assert(err, check.IsNil) + require.NoError(t, err) dir2 := path.Join(tmpDir, "dir2") err = os.Mkdir(dir2, 0755) - c.Assert(err, check.IsNil) + require.NoError(t, err) // create leading dirs err = os.MkdirAll(path.Dir(path.Join(dir1, image)), 0755) - c.Assert(err, check.IsNil) + require.NoError(t, err) // copy docker => dir - assertSkopeoSucceeds(c, "", "copy", "docker://"+image, "dir:"+path.Join(dir1, image)) + assertSkopeoSucceeds(t, "", "copy", "docker://"+image, "dir:"+path.Join(dir1, image)) _, err = os.Stat(path.Join(dir1, image, "manifest.json")) - c.Assert(err, check.IsNil) + require.NoError(t, err) // sync dir => docker - assertSkopeoSucceeds(c, "", "sync", "--scoped", "--dest-tls-verify=false", "--src", "dir", "--dest", "docker", dir1, v2DockerRegistryURL) + assertSkopeoSucceeds(t, "", "sync", "--scoped", "--dest-tls-verify=false", "--src", "dir", "--dest", "docker", dir1, v2DockerRegistryURL) // create leading dirs err = os.MkdirAll(path.Dir(path.Join(dir2, image)), 0755) - c.Assert(err, check.IsNil) + require.NoError(t, err) // copy docker => dir - assertSkopeoSucceeds(c, "", "copy", "--src-tls-verify=false", localRegURL+image, "dir:"+path.Join(dir2, image)) + assertSkopeoSucceeds(t, "", "copy", "--src-tls-verify=false", localRegURL+image, "dir:"+path.Join(dir2, image)) _, err = os.Stat(path.Join(dir2, image, "manifest.json")) - c.Assert(err, check.IsNil) + require.NoError(t, err) - out := combinedOutputOfCommand(c, "diff", "-urN", dir1, dir2) - c.Assert(out, check.Equals, "") + out := combinedOutputOfCommand(t, "diff", "-urN", dir1, dir2) + assert.Equal(t, "", out) } -func (s *SyncSuite) TestFailsWithDir2Dir(c *check.C) { - tmpDir := c.MkDir() +func (s *syncSuite) TestFailsWithDir2Dir() { + t := s.T() + tmpDir := t.TempDir() dir1 := path.Join(tmpDir, "dir1") dir2 := path.Join(tmpDir, "dir2") // sync dir => dir is not allowed - assertSkopeoFails(c, ".*sync from 'dir' to 'dir' not implemented.*", "sync", "--scoped", "--src", "dir", "--dest", "dir", dir1, dir2) + assertSkopeoFails(t, ".*sync from 'dir' to 'dir' not implemented.*", "sync", "--scoped", "--src", "dir", "--dest", "dir", dir1, dir2) } -func (s *SyncSuite) TestFailsNoSourceImages(c *check.C) { - tmpDir := c.MkDir() +func (s *syncSuite) TestFailsNoSourceImages() { + t := s.T() + tmpDir := t.TempDir() - assertSkopeoFails(c, ".*No images to sync found in .*", + assertSkopeoFails(t, ".*No images to sync found in .*", "sync", "--scoped", "--dest-tls-verify=false", "--src", "dir", "--dest", "docker", tmpDir, v2DockerRegistryURL) - assertSkopeoFails(c, ".*Error determining repository tags for repo docker.io/library/hopefully_no_images_will_ever_be_called_like_this: fetching tags list: requested access to the resource is denied.*", + assertSkopeoFails(t, ".*Error determining repository tags for repo docker.io/library/hopefully_no_images_will_ever_be_called_like_this: fetching tags list: requested access to the resource is denied.*", "sync", "--scoped", "--dest-tls-verify=false", "--src", "docker", "--dest", "docker", "hopefully_no_images_will_ever_be_called_like_this", v2DockerRegistryURL) } -func (s *SyncSuite) TestFailsWithDockerSourceNoRegistry(c *check.C) { +func (s *syncSuite) TestFailsWithDockerSourceNoRegistry() { + t := s.T() const regURL = "google.com/namespace/imagename" - tmpDir := c.MkDir() + tmpDir := t.TempDir() //untagged - assertSkopeoFails(c, ".*StatusCode: 404.*", + assertSkopeoFails(t, ".*StatusCode: 404.*", "sync", "--scoped", "--src", "docker", "--dest", "dir", regURL, tmpDir) //tagged - assertSkopeoFails(c, ".*StatusCode: 404.*", + assertSkopeoFails(t, ".*StatusCode: 404.*", "sync", "--scoped", "--src", "docker", "--dest", "dir", regURL+":thetag", tmpDir) } -func (s *SyncSuite) TestFailsWithDockerSourceUnauthorized(c *check.C) { +func (s *syncSuite) TestFailsWithDockerSourceUnauthorized() { + t := s.T() const repo = "privateimagenamethatshouldnotbepublic" - tmpDir := c.MkDir() + tmpDir := t.TempDir() //untagged - assertSkopeoFails(c, ".*requested access to the resource is denied.*", + assertSkopeoFails(t, ".*requested access to the resource is denied.*", "sync", "--scoped", "--src", "docker", "--dest", "dir", repo, tmpDir) //tagged - assertSkopeoFails(c, ".*requested access to the resource is denied.*", + assertSkopeoFails(t, ".*requested access to the resource is denied.*", "sync", "--scoped", "--src", "docker", "--dest", "dir", repo+":thetag", tmpDir) } -func (s *SyncSuite) TestFailsWithDockerSourceNotExisting(c *check.C) { +func (s *syncSuite) TestFailsWithDockerSourceNotExisting() { + t := s.T() repo := path.Join(v2DockerRegistryURL, "imagedoesnotexist") - tmpDir := c.MkDir() + tmpDir := t.TempDir() //untagged - assertSkopeoFails(c, ".*repository name not known to registry.*", + assertSkopeoFails(t, ".*repository name not known to registry.*", "sync", "--scoped", "--src-tls-verify=false", "--src", "docker", "--dest", "dir", repo, tmpDir) //tagged - assertSkopeoFails(c, ".*reading manifest.*", + assertSkopeoFails(t, ".*reading manifest.*", "sync", "--scoped", "--src-tls-verify=false", "--src", "docker", "--dest", "dir", repo+":thetag", tmpDir) } -func (s *SyncSuite) TestFailsWithDirSourceNotExisting(c *check.C) { +func (s *syncSuite) TestFailsWithDirSourceNotExisting() { + t := s.T() // Make sure the dir does not exist! - tmpDir := c.MkDir() + tmpDir := t.TempDir() tmpDir = filepath.Join(tmpDir, "this-does-not-exist") err := os.RemoveAll(tmpDir) - c.Assert(err, check.IsNil) + require.NoError(t, err) _, err = os.Stat(path.Join(tmpDir)) - c.Check(os.IsNotExist(err), check.Equals, true) + assert.True(t, os.IsNotExist(err)) - assertSkopeoFails(c, ".*no such file or directory.*", + assertSkopeoFails(t, ".*no such file or directory.*", "sync", "--scoped", "--dest-tls-verify=false", "--src", "dir", "--dest", "docker", tmpDir, v2DockerRegistryURL) } diff --git a/integration/utils.go b/integration/utils.go index b1e049c41d..128127eaf2 100644 --- a/integration/utils.go +++ b/integration/utils.go @@ -9,10 +9,12 @@ import ( "os/exec" "path/filepath" "strings" + "testing" "time" "github.com/containers/image/v5/manifest" - "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) const skopeoBinary = "skopeo" @@ -22,19 +24,19 @@ const testFQIN = "docker://quay.io/libpod/busybox" // tag left off on purpose, s const testFQIN64 = "docker://quay.io/libpod/busybox:amd64" const testFQINMultiLayer = "docker://quay.io/libpod/alpine_nginx:latest" // multi-layer -// consumeAndLogOutputStream takes (f, err) from an exec.*Pipe(), and causes all output to it to be logged to c. -func consumeAndLogOutputStream(c *check.C, id string, f io.ReadCloser, err error) { - c.Assert(err, check.IsNil) +// consumeAndLogOutputStream takes (f, err) from an exec.*Pipe(), and causes all output to it to be logged to t. +func consumeAndLogOutputStream(t *testing.T, id string, f io.ReadCloser, err error) { + require.NoError(t, err) go func() { defer func() { f.Close() - c.Logf("Output %s: Closed", id) + t.Logf("Output %s: Closed", id) }() buf := make([]byte, 1024) for { - c.Logf("Output %s: waiting", id) + t.Logf("Output %s: waiting", id) n, err := f.Read(buf) - c.Logf("Output %s: got %d,%#v: %s", id, n, err, strings.TrimSuffix(string(buf[:n]), "\n")) + t.Logf("Output %s: got %d,%#v: %s", id, n, err, strings.TrimSuffix(string(buf[:n]), "\n")) if n <= 0 { break } @@ -43,66 +45,66 @@ func consumeAndLogOutputStream(c *check.C, id string, f io.ReadCloser, err error } // consumeAndLogOutputs causes all output to stdout and stderr from an *exec.Cmd to be logged to c. -func consumeAndLogOutputs(c *check.C, id string, cmd *exec.Cmd) { +func consumeAndLogOutputs(t *testing.T, id string, cmd *exec.Cmd) { stdout, err := cmd.StdoutPipe() - consumeAndLogOutputStream(c, id+" stdout", stdout, err) + consumeAndLogOutputStream(t, id+" stdout", stdout, err) stderr, err := cmd.StderrPipe() - consumeAndLogOutputStream(c, id+" stderr", stderr, err) + consumeAndLogOutputStream(t, id+" stderr", stderr, err) } // combinedOutputOfCommand runs a command as if exec.Command().CombinedOutput(), verifies that the exit status is 0, and returns the output, // or terminates c on failure. -func combinedOutputOfCommand(c *check.C, name string, args ...string) string { - c.Logf("Running %s %s", name, strings.Join(args, " ")) +func combinedOutputOfCommand(t *testing.T, name string, args ...string) string { + t.Logf("Running %s %s", name, strings.Join(args, " ")) out, err := exec.Command(name, args...).CombinedOutput() - c.Assert(err, check.IsNil, check.Commentf("%s", out)) + require.NoError(t, err, "%s", out) return string(out) } // assertSkopeoSucceeds runs a skopeo command as if exec.Command().CombinedOutput, verifies that the exit status is 0, // and optionally that the output matches a multi-line regexp if it is nonempty; // or terminates c on failure -func assertSkopeoSucceeds(c *check.C, regexp string, args ...string) { - c.Logf("Running %s %s", skopeoBinary, strings.Join(args, " ")) +func assertSkopeoSucceeds(t *testing.T, regexp string, args ...string) { + t.Logf("Running %s %s", skopeoBinary, strings.Join(args, " ")) out, err := exec.Command(skopeoBinary, args...).CombinedOutput() - c.Assert(err, check.IsNil, check.Commentf("%s", out)) + assert.NoError(t, err, "%s", out) if regexp != "" { - c.Assert(string(out), check.Matches, "(?s)"+regexp) // (?s) : '.' will also match newlines + assert.Regexp(t, "(?s)"+regexp, string(out)) // (?s) : '.' will also match newlines } } // assertSkopeoFails runs a skopeo command as if exec.Command().CombinedOutput, verifies that the exit status is 0, // and that the output matches a multi-line regexp; // or terminates c on failure -func assertSkopeoFails(c *check.C, regexp string, args ...string) { - c.Logf("Running %s %s", skopeoBinary, strings.Join(args, " ")) +func assertSkopeoFails(t *testing.T, regexp string, args ...string) { + t.Logf("Running %s %s", skopeoBinary, strings.Join(args, " ")) out, err := exec.Command(skopeoBinary, args...).CombinedOutput() - c.Assert(err, check.NotNil, check.Commentf("%s", out)) - c.Assert(string(out), check.Matches, "(?s)"+regexp) // (?s) : '.' will also match newlines + assert.Error(t, err, "%s", out) + assert.Regexp(t, "(?s)"+regexp, string(out)) // (?s) : '.' will also match newlines } // runCommandWithInput runs a command as if exec.Command(), sending it the input to stdin, // and verifies that the exit status is 0, or terminates c on failure. -func runCommandWithInput(c *check.C, input string, name string, args ...string) { +func runCommandWithInput(t *testing.T, input string, name string, args ...string) { cmd := exec.Command(name, args...) - runExecCmdWithInput(c, cmd, input) + runExecCmdWithInput(t, cmd, input) } // runExecCmdWithInput runs an exec.Cmd, sending it the input to stdin, // and verifies that the exit status is 0, or terminates c on failure. -func runExecCmdWithInput(c *check.C, cmd *exec.Cmd, input string) { - c.Logf("Running %s %s", cmd.Path, strings.Join(cmd.Args, " ")) - consumeAndLogOutputs(c, cmd.Path+" "+strings.Join(cmd.Args, " "), cmd) +func runExecCmdWithInput(t *testing.T, cmd *exec.Cmd, input string) { + t.Logf("Running %s %s", cmd.Path, strings.Join(cmd.Args, " ")) + consumeAndLogOutputs(t, cmd.Path+" "+strings.Join(cmd.Args, " "), cmd) stdin, err := cmd.StdinPipe() - c.Assert(err, check.IsNil) + require.NoError(t, err) err = cmd.Start() - c.Assert(err, check.IsNil) + require.NoError(t, err) _, err = stdin.Write([]byte(input)) - c.Assert(err, check.IsNil) + require.NoError(t, err) err = stdin.Close() - c.Assert(err, check.IsNil) + require.NoError(t, err) err = cmd.Wait() - c.Assert(err, check.IsNil) + assert.NoError(t, err) } // isPortOpen returns true iff the specified port on localhost is open. @@ -120,29 +122,29 @@ func isPortOpen(port uint16) bool { // The checking can be aborted by sending a value to the terminate channel, which the caller should // always do using // defer func() {terminate <- true}() -func newPortChecker(c *check.C, port uint16) (portOpen <-chan bool, terminate chan<- bool) { +func newPortChecker(t *testing.T, port uint16) (portOpen <-chan bool, terminate chan<- bool) { portOpenBidi := make(chan bool) // Buffered, so that sending a terminate request after the goroutine has exited does not block. terminateBidi := make(chan bool, 1) go func() { defer func() { - c.Logf("Port checker for port %d exiting", port) + t.Logf("Port checker for port %d exiting", port) }() for { - c.Logf("Checking for port %d...", port) + t.Logf("Checking for port %d...", port) if isPortOpen(port) { - c.Logf("Port %d open", port) + t.Logf("Port %d open", port) portOpenBidi <- true return } - c.Logf("Sleeping for port %d", port) + t.Logf("Sleeping for port %d", port) sleepChan := time.After(100 * time.Millisecond) select { case <-sleepChan: // Try again - c.Logf("Sleeping for port %d done, will retry", port) + t.Logf("Sleeping for port %d done, will retry", port) case <-terminateBidi: - c.Logf("Check for port %d terminated", port) + t.Logf("Check for port %d terminated", port) return } } @@ -164,54 +166,54 @@ func modifyEnviron(env []string, name, value string) []string { // fileFromFixtureFixture applies edits to inputPath and returns a path to the temporary file. // Callers should defer os.Remove(the_returned_path) -func fileFromFixture(c *check.C, inputPath string, edits map[string]string) string { +func fileFromFixture(t *testing.T, inputPath string, edits map[string]string) string { contents, err := os.ReadFile(inputPath) - c.Assert(err, check.IsNil) + require.NoError(t, err) for template, value := range edits { updated := bytes.ReplaceAll(contents, []byte(template), []byte(value)) - c.Assert(bytes.Equal(updated, contents), check.Equals, false, check.Commentf("Replacing %s in %#v failed", template, string(contents))) // Verify that the template has matched something and we are not silently ignoring it. + require.NotEqual(t, contents, updated, "Replacing %s in %#v failed", template, string(contents)) // Verify that the template has matched something and we are not silently ignoring it. contents = updated } file, err := os.CreateTemp("", "policy.json") - c.Assert(err, check.IsNil) + require.NoError(t, err) path := file.Name() _, err = file.Write(contents) - c.Assert(err, check.IsNil) + require.NoError(t, err) err = file.Close() - c.Assert(err, check.IsNil) + require.NoError(t, err) return path } // runDecompressDirs runs decompress-dirs.sh using exec.Command().CombinedOutput, verifies that the exit status is 0, // and optionally that the output matches a multi-line regexp if it is nonempty; or terminates c on failure -func runDecompressDirs(c *check.C, regexp string, args ...string) { - c.Logf("Running %s %s", decompressDirsBinary, strings.Join(args, " ")) +func runDecompressDirs(t *testing.T, regexp string, args ...string) { + t.Logf("Running %s %s", decompressDirsBinary, strings.Join(args, " ")) for i, dir := range args { m, err := os.ReadFile(filepath.Join(dir, "manifest.json")) - c.Assert(err, check.IsNil) - c.Logf("manifest %d before: %s", i+1, string(m)) + require.NoError(t, err) + t.Logf("manifest %d before: %s", i+1, string(m)) } out, err := exec.Command(decompressDirsBinary, args...).CombinedOutput() - c.Assert(err, check.IsNil, check.Commentf("%s", out)) + assert.NoError(t, err, "%s", out) for i, dir := range args { if len(out) > 0 { - c.Logf("output: %s", out) + t.Logf("output: %s", out) } m, err := os.ReadFile(filepath.Join(dir, "manifest.json")) - c.Assert(err, check.IsNil) - c.Logf("manifest %d after: %s", i+1, string(m)) + require.NoError(t, err) + t.Logf("manifest %d after: %s", i+1, string(m)) } if regexp != "" { - c.Assert(string(out), check.Matches, "(?s)"+regexp) // (?s) : '.' will also match newlines + assert.Regexp(t, "(?s)"+regexp, string(out)) // (?s) : '.' will also match newlines } } // Verify manifest in a dir: image at dir is expectedMIMEType. -func verifyManifestMIMEType(c *check.C, dir string, expectedMIMEType string) { +func verifyManifestMIMEType(t *testing.T, dir string, expectedMIMEType string) { manifestBlob, err := os.ReadFile(filepath.Join(dir, "manifest.json")) - c.Assert(err, check.IsNil) + require.NoError(t, err) mimeType := manifest.GuessMIMEType(manifestBlob) - c.Assert(mimeType, check.Equals, expectedMIMEType) + assert.Equal(t, expectedMIMEType, mimeType) } diff --git a/vendor/github.com/kr/pretty/.gitignore b/vendor/github.com/kr/pretty/.gitignore deleted file mode 100644 index 1f0a99f2f2..0000000000 --- a/vendor/github.com/kr/pretty/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -[568].out -_go* -_test* -_obj diff --git a/vendor/github.com/kr/pretty/License b/vendor/github.com/kr/pretty/License deleted file mode 100644 index 480a328059..0000000000 --- a/vendor/github.com/kr/pretty/License +++ /dev/null @@ -1,19 +0,0 @@ -Copyright 2012 Keith Rarick - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/kr/pretty/Readme b/vendor/github.com/kr/pretty/Readme deleted file mode 100644 index c589fc622b..0000000000 --- a/vendor/github.com/kr/pretty/Readme +++ /dev/null @@ -1,9 +0,0 @@ -package pretty - - import "github.com/kr/pretty" - - Package pretty provides pretty-printing for Go values. - -Documentation - - http://godoc.org/github.com/kr/pretty diff --git a/vendor/github.com/kr/pretty/diff.go b/vendor/github.com/kr/pretty/diff.go deleted file mode 100644 index 40a09dc648..0000000000 --- a/vendor/github.com/kr/pretty/diff.go +++ /dev/null @@ -1,295 +0,0 @@ -package pretty - -import ( - "fmt" - "io" - "reflect" -) - -type sbuf []string - -func (p *sbuf) Printf(format string, a ...interface{}) { - s := fmt.Sprintf(format, a...) - *p = append(*p, s) -} - -// Diff returns a slice where each element describes -// a difference between a and b. -func Diff(a, b interface{}) (desc []string) { - Pdiff((*sbuf)(&desc), a, b) - return desc -} - -// wprintfer calls Fprintf on w for each Printf call -// with a trailing newline. -type wprintfer struct{ w io.Writer } - -func (p *wprintfer) Printf(format string, a ...interface{}) { - fmt.Fprintf(p.w, format+"\n", a...) -} - -// Fdiff writes to w a description of the differences between a and b. -func Fdiff(w io.Writer, a, b interface{}) { - Pdiff(&wprintfer{w}, a, b) -} - -type Printfer interface { - Printf(format string, a ...interface{}) -} - -// Pdiff prints to p a description of the differences between a and b. -// It calls Printf once for each difference, with no trailing newline. -// The standard library log.Logger is a Printfer. -func Pdiff(p Printfer, a, b interface{}) { - d := diffPrinter{ - w: p, - aVisited: make(map[visit]visit), - bVisited: make(map[visit]visit), - } - d.diff(reflect.ValueOf(a), reflect.ValueOf(b)) -} - -type Logfer interface { - Logf(format string, a ...interface{}) -} - -// logprintfer calls Fprintf on w for each Printf call -// with a trailing newline. -type logprintfer struct{ l Logfer } - -func (p *logprintfer) Printf(format string, a ...interface{}) { - p.l.Logf(format, a...) -} - -// Ldiff prints to l a description of the differences between a and b. -// It calls Logf once for each difference, with no trailing newline. -// The standard library testing.T and testing.B are Logfers. -func Ldiff(l Logfer, a, b interface{}) { - Pdiff(&logprintfer{l}, a, b) -} - -type diffPrinter struct { - w Printfer - l string // label - - aVisited map[visit]visit - bVisited map[visit]visit -} - -func (w diffPrinter) printf(f string, a ...interface{}) { - var l string - if w.l != "" { - l = w.l + ": " - } - w.w.Printf(l+f, a...) -} - -func (w diffPrinter) diff(av, bv reflect.Value) { - if !av.IsValid() && bv.IsValid() { - w.printf("nil != %# v", formatter{v: bv, quote: true}) - return - } - if av.IsValid() && !bv.IsValid() { - w.printf("%# v != nil", formatter{v: av, quote: true}) - return - } - if !av.IsValid() && !bv.IsValid() { - return - } - - at := av.Type() - bt := bv.Type() - if at != bt { - w.printf("%v != %v", at, bt) - return - } - - if av.CanAddr() && bv.CanAddr() { - avis := visit{av.UnsafeAddr(), at} - bvis := visit{bv.UnsafeAddr(), bt} - var cycle bool - - // Have we seen this value before? - if vis, ok := w.aVisited[avis]; ok { - cycle = true - if vis != bvis { - w.printf("%# v (previously visited) != %# v", formatter{v: av, quote: true}, formatter{v: bv, quote: true}) - } - } else if _, ok := w.bVisited[bvis]; ok { - cycle = true - w.printf("%# v != %# v (previously visited)", formatter{v: av, quote: true}, formatter{v: bv, quote: true}) - } - w.aVisited[avis] = bvis - w.bVisited[bvis] = avis - if cycle { - return - } - } - - switch kind := at.Kind(); kind { - case reflect.Bool: - if a, b := av.Bool(), bv.Bool(); a != b { - w.printf("%v != %v", a, b) - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if a, b := av.Int(), bv.Int(); a != b { - w.printf("%d != %d", a, b) - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - if a, b := av.Uint(), bv.Uint(); a != b { - w.printf("%d != %d", a, b) - } - case reflect.Float32, reflect.Float64: - if a, b := av.Float(), bv.Float(); a != b { - w.printf("%v != %v", a, b) - } - case reflect.Complex64, reflect.Complex128: - if a, b := av.Complex(), bv.Complex(); a != b { - w.printf("%v != %v", a, b) - } - case reflect.Array: - n := av.Len() - for i := 0; i < n; i++ { - w.relabel(fmt.Sprintf("[%d]", i)).diff(av.Index(i), bv.Index(i)) - } - case reflect.Chan, reflect.Func, reflect.UnsafePointer: - if a, b := av.Pointer(), bv.Pointer(); a != b { - w.printf("%#x != %#x", a, b) - } - case reflect.Interface: - w.diff(av.Elem(), bv.Elem()) - case reflect.Map: - ak, both, bk := keyDiff(av.MapKeys(), bv.MapKeys()) - for _, k := range ak { - w := w.relabel(fmt.Sprintf("[%#v]", k)) - w.printf("%q != (missing)", av.MapIndex(k)) - } - for _, k := range both { - w := w.relabel(fmt.Sprintf("[%#v]", k)) - w.diff(av.MapIndex(k), bv.MapIndex(k)) - } - for _, k := range bk { - w := w.relabel(fmt.Sprintf("[%#v]", k)) - w.printf("(missing) != %q", bv.MapIndex(k)) - } - case reflect.Ptr: - switch { - case av.IsNil() && !bv.IsNil(): - w.printf("nil != %# v", formatter{v: bv, quote: true}) - case !av.IsNil() && bv.IsNil(): - w.printf("%# v != nil", formatter{v: av, quote: true}) - case !av.IsNil() && !bv.IsNil(): - w.diff(av.Elem(), bv.Elem()) - } - case reflect.Slice: - lenA := av.Len() - lenB := bv.Len() - if lenA != lenB { - w.printf("%s[%d] != %s[%d]", av.Type(), lenA, bv.Type(), lenB) - break - } - for i := 0; i < lenA; i++ { - w.relabel(fmt.Sprintf("[%d]", i)).diff(av.Index(i), bv.Index(i)) - } - case reflect.String: - if a, b := av.String(), bv.String(); a != b { - w.printf("%q != %q", a, b) - } - case reflect.Struct: - for i := 0; i < av.NumField(); i++ { - w.relabel(at.Field(i).Name).diff(av.Field(i), bv.Field(i)) - } - default: - panic("unknown reflect Kind: " + kind.String()) - } -} - -func (d diffPrinter) relabel(name string) (d1 diffPrinter) { - d1 = d - if d.l != "" && name[0] != '[' { - d1.l += "." - } - d1.l += name - return d1 -} - -// keyEqual compares a and b for equality. -// Both a and b must be valid map keys. -func keyEqual(av, bv reflect.Value) bool { - if !av.IsValid() && !bv.IsValid() { - return true - } - if !av.IsValid() || !bv.IsValid() || av.Type() != bv.Type() { - return false - } - switch kind := av.Kind(); kind { - case reflect.Bool: - a, b := av.Bool(), bv.Bool() - return a == b - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - a, b := av.Int(), bv.Int() - return a == b - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - a, b := av.Uint(), bv.Uint() - return a == b - case reflect.Float32, reflect.Float64: - a, b := av.Float(), bv.Float() - return a == b - case reflect.Complex64, reflect.Complex128: - a, b := av.Complex(), bv.Complex() - return a == b - case reflect.Array: - for i := 0; i < av.Len(); i++ { - if !keyEqual(av.Index(i), bv.Index(i)) { - return false - } - } - return true - case reflect.Chan, reflect.UnsafePointer, reflect.Ptr: - a, b := av.Pointer(), bv.Pointer() - return a == b - case reflect.Interface: - return keyEqual(av.Elem(), bv.Elem()) - case reflect.String: - a, b := av.String(), bv.String() - return a == b - case reflect.Struct: - for i := 0; i < av.NumField(); i++ { - if !keyEqual(av.Field(i), bv.Field(i)) { - return false - } - } - return true - default: - panic("invalid map key type " + av.Type().String()) - } -} - -func keyDiff(a, b []reflect.Value) (ak, both, bk []reflect.Value) { - for _, av := range a { - inBoth := false - for _, bv := range b { - if keyEqual(av, bv) { - inBoth = true - both = append(both, av) - break - } - } - if !inBoth { - ak = append(ak, av) - } - } - for _, bv := range b { - inBoth := false - for _, av := range a { - if keyEqual(av, bv) { - inBoth = true - break - } - } - if !inBoth { - bk = append(bk, bv) - } - } - return -} diff --git a/vendor/github.com/kr/pretty/formatter.go b/vendor/github.com/kr/pretty/formatter.go deleted file mode 100644 index 249f089ef0..0000000000 --- a/vendor/github.com/kr/pretty/formatter.go +++ /dev/null @@ -1,336 +0,0 @@ -package pretty - -import ( - "fmt" - "io" - "reflect" - "strconv" - "text/tabwriter" - - "github.com/kr/text" - "github.com/rogpeppe/go-internal/fmtsort" -) - -type formatter struct { - v reflect.Value - force bool - quote bool -} - -// Formatter makes a wrapper, f, that will format x as go source with line -// breaks and tabs. Object f responds to the "%v" formatting verb when both the -// "#" and " " (space) flags are set, for example: -// -// fmt.Sprintf("%# v", Formatter(x)) -// -// If one of these two flags is not set, or any other verb is used, f will -// format x according to the usual rules of package fmt. -// In particular, if x satisfies fmt.Formatter, then x.Format will be called. -func Formatter(x interface{}) (f fmt.Formatter) { - return formatter{v: reflect.ValueOf(x), quote: true} -} - -func (fo formatter) String() string { - return fmt.Sprint(fo.v.Interface()) // unwrap it -} - -func (fo formatter) passThrough(f fmt.State, c rune) { - s := "%" - for i := 0; i < 128; i++ { - if f.Flag(i) { - s += string(rune(i)) - } - } - if w, ok := f.Width(); ok { - s += fmt.Sprintf("%d", w) - } - if p, ok := f.Precision(); ok { - s += fmt.Sprintf(".%d", p) - } - s += string(c) - fmt.Fprintf(f, s, fo.v.Interface()) -} - -func (fo formatter) Format(f fmt.State, c rune) { - if fo.force || c == 'v' && f.Flag('#') && f.Flag(' ') { - w := tabwriter.NewWriter(f, 4, 4, 1, ' ', 0) - p := &printer{tw: w, Writer: w, visited: make(map[visit]int)} - p.printValue(fo.v, true, fo.quote) - w.Flush() - return - } - fo.passThrough(f, c) -} - -type printer struct { - io.Writer - tw *tabwriter.Writer - visited map[visit]int - depth int -} - -func (p *printer) indent() *printer { - q := *p - q.tw = tabwriter.NewWriter(p.Writer, 4, 4, 1, ' ', 0) - q.Writer = text.NewIndentWriter(q.tw, []byte{'\t'}) - return &q -} - -func (p *printer) printInline(v reflect.Value, x interface{}, showType bool) { - if showType { - io.WriteString(p, v.Type().String()) - fmt.Fprintf(p, "(%#v)", x) - } else { - fmt.Fprintf(p, "%#v", x) - } -} - -// printValue must keep track of already-printed pointer values to avoid -// infinite recursion. -type visit struct { - v uintptr - typ reflect.Type -} - -func (p *printer) printValue(v reflect.Value, showType, quote bool) { - if p.depth > 10 { - io.WriteString(p, "!%v(DEPTH EXCEEDED)") - return - } - - if v.IsValid() && v.CanInterface() { - i := v.Interface() - if goStringer, ok := i.(fmt.GoStringer); ok { - io.WriteString(p, goStringer.GoString()) - return - } - } - - switch v.Kind() { - case reflect.Bool: - p.printInline(v, v.Bool(), showType) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - p.printInline(v, v.Int(), showType) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - p.printInline(v, v.Uint(), showType) - case reflect.Float32, reflect.Float64: - p.printInline(v, v.Float(), showType) - case reflect.Complex64, reflect.Complex128: - fmt.Fprintf(p, "%#v", v.Complex()) - case reflect.String: - p.fmtString(v.String(), quote) - case reflect.Map: - t := v.Type() - if showType { - io.WriteString(p, t.String()) - } - writeByte(p, '{') - if nonzero(v) { - expand := !canInline(v.Type()) - pp := p - if expand { - writeByte(p, '\n') - pp = p.indent() - } - sm := fmtsort.Sort(v) - for i := 0; i < v.Len(); i++ { - k := sm.Key[i] - mv := sm.Value[i] - pp.printValue(k, false, true) - writeByte(pp, ':') - if expand { - writeByte(pp, '\t') - } - showTypeInStruct := t.Elem().Kind() == reflect.Interface - pp.printValue(mv, showTypeInStruct, true) - if expand { - io.WriteString(pp, ",\n") - } else if i < v.Len()-1 { - io.WriteString(pp, ", ") - } - } - if expand { - pp.tw.Flush() - } - } - writeByte(p, '}') - case reflect.Struct: - t := v.Type() - if v.CanAddr() { - addr := v.UnsafeAddr() - vis := visit{addr, t} - if vd, ok := p.visited[vis]; ok && vd < p.depth { - p.fmtString(t.String()+"{(CYCLIC REFERENCE)}", false) - break // don't print v again - } - p.visited[vis] = p.depth - } - - if showType { - io.WriteString(p, t.String()) - } - writeByte(p, '{') - if nonzero(v) { - expand := !canInline(v.Type()) - pp := p - if expand { - writeByte(p, '\n') - pp = p.indent() - } - for i := 0; i < v.NumField(); i++ { - showTypeInStruct := true - if f := t.Field(i); f.Name != "" { - io.WriteString(pp, f.Name) - writeByte(pp, ':') - if expand { - writeByte(pp, '\t') - } - showTypeInStruct = labelType(f.Type) - } - pp.printValue(getField(v, i), showTypeInStruct, true) - if expand { - io.WriteString(pp, ",\n") - } else if i < v.NumField()-1 { - io.WriteString(pp, ", ") - } - } - if expand { - pp.tw.Flush() - } - } - writeByte(p, '}') - case reflect.Interface: - switch e := v.Elem(); { - case e.Kind() == reflect.Invalid: - io.WriteString(p, "nil") - case e.IsValid(): - pp := *p - pp.depth++ - pp.printValue(e, showType, true) - default: - io.WriteString(p, v.Type().String()) - io.WriteString(p, "(nil)") - } - case reflect.Array, reflect.Slice: - t := v.Type() - if showType { - io.WriteString(p, t.String()) - } - if v.Kind() == reflect.Slice && v.IsNil() && showType { - io.WriteString(p, "(nil)") - break - } - if v.Kind() == reflect.Slice && v.IsNil() { - io.WriteString(p, "nil") - break - } - writeByte(p, '{') - expand := !canInline(v.Type()) - pp := p - if expand { - writeByte(p, '\n') - pp = p.indent() - } - for i := 0; i < v.Len(); i++ { - showTypeInSlice := t.Elem().Kind() == reflect.Interface - pp.printValue(v.Index(i), showTypeInSlice, true) - if expand { - io.WriteString(pp, ",\n") - } else if i < v.Len()-1 { - io.WriteString(pp, ", ") - } - } - if expand { - pp.tw.Flush() - } - writeByte(p, '}') - case reflect.Ptr: - e := v.Elem() - if !e.IsValid() { - writeByte(p, '(') - io.WriteString(p, v.Type().String()) - io.WriteString(p, ")(nil)") - } else { - pp := *p - pp.depth++ - writeByte(pp, '&') - pp.printValue(e, true, true) - } - case reflect.Chan: - x := v.Pointer() - if showType { - writeByte(p, '(') - io.WriteString(p, v.Type().String()) - fmt.Fprintf(p, ")(%#v)", x) - } else { - fmt.Fprintf(p, "%#v", x) - } - case reflect.Func: - io.WriteString(p, v.Type().String()) - io.WriteString(p, " {...}") - case reflect.UnsafePointer: - p.printInline(v, v.Pointer(), showType) - case reflect.Invalid: - io.WriteString(p, "nil") - } -} - -func canInline(t reflect.Type) bool { - switch t.Kind() { - case reflect.Map: - return !canExpand(t.Elem()) - case reflect.Struct: - for i := 0; i < t.NumField(); i++ { - if canExpand(t.Field(i).Type) { - return false - } - } - return true - case reflect.Interface: - return false - case reflect.Array, reflect.Slice: - return !canExpand(t.Elem()) - case reflect.Ptr: - return false - case reflect.Chan, reflect.Func, reflect.UnsafePointer: - return false - } - return true -} - -func canExpand(t reflect.Type) bool { - switch t.Kind() { - case reflect.Map, reflect.Struct, - reflect.Interface, reflect.Array, reflect.Slice, - reflect.Ptr: - return true - } - return false -} - -func labelType(t reflect.Type) bool { - switch t.Kind() { - case reflect.Interface, reflect.Struct: - return true - } - return false -} - -func (p *printer) fmtString(s string, quote bool) { - if quote { - s = strconv.Quote(s) - } - io.WriteString(p, s) -} - -func writeByte(w io.Writer, b byte) { - w.Write([]byte{b}) -} - -func getField(v reflect.Value, i int) reflect.Value { - val := v.Field(i) - if val.Kind() == reflect.Interface && !val.IsNil() { - val = val.Elem() - } - return val -} diff --git a/vendor/github.com/kr/pretty/pretty.go b/vendor/github.com/kr/pretty/pretty.go deleted file mode 100644 index b4ca583c02..0000000000 --- a/vendor/github.com/kr/pretty/pretty.go +++ /dev/null @@ -1,108 +0,0 @@ -// Package pretty provides pretty-printing for Go values. This is -// useful during debugging, to avoid wrapping long output lines in -// the terminal. -// -// It provides a function, Formatter, that can be used with any -// function that accepts a format string. It also provides -// convenience wrappers for functions in packages fmt and log. -package pretty - -import ( - "fmt" - "io" - "log" - "reflect" -) - -// Errorf is a convenience wrapper for fmt.Errorf. -// -// Calling Errorf(f, x, y) is equivalent to -// fmt.Errorf(f, Formatter(x), Formatter(y)). -func Errorf(format string, a ...interface{}) error { - return fmt.Errorf(format, wrap(a, false)...) -} - -// Fprintf is a convenience wrapper for fmt.Fprintf. -// -// Calling Fprintf(w, f, x, y) is equivalent to -// fmt.Fprintf(w, f, Formatter(x), Formatter(y)). -func Fprintf(w io.Writer, format string, a ...interface{}) (n int, error error) { - return fmt.Fprintf(w, format, wrap(a, false)...) -} - -// Log is a convenience wrapper for log.Printf. -// -// Calling Log(x, y) is equivalent to -// log.Print(Formatter(x), Formatter(y)), but each operand is -// formatted with "%# v". -func Log(a ...interface{}) { - log.Print(wrap(a, true)...) -} - -// Logf is a convenience wrapper for log.Printf. -// -// Calling Logf(f, x, y) is equivalent to -// log.Printf(f, Formatter(x), Formatter(y)). -func Logf(format string, a ...interface{}) { - log.Printf(format, wrap(a, false)...) -} - -// Logln is a convenience wrapper for log.Printf. -// -// Calling Logln(x, y) is equivalent to -// log.Println(Formatter(x), Formatter(y)), but each operand is -// formatted with "%# v". -func Logln(a ...interface{}) { - log.Println(wrap(a, true)...) -} - -// Print pretty-prints its operands and writes to standard output. -// -// Calling Print(x, y) is equivalent to -// fmt.Print(Formatter(x), Formatter(y)), but each operand is -// formatted with "%# v". -func Print(a ...interface{}) (n int, errno error) { - return fmt.Print(wrap(a, true)...) -} - -// Printf is a convenience wrapper for fmt.Printf. -// -// Calling Printf(f, x, y) is equivalent to -// fmt.Printf(f, Formatter(x), Formatter(y)). -func Printf(format string, a ...interface{}) (n int, errno error) { - return fmt.Printf(format, wrap(a, false)...) -} - -// Println pretty-prints its operands and writes to standard output. -// -// Calling Println(x, y) is equivalent to -// fmt.Println(Formatter(x), Formatter(y)), but each operand is -// formatted with "%# v". -func Println(a ...interface{}) (n int, errno error) { - return fmt.Println(wrap(a, true)...) -} - -// Sprint is a convenience wrapper for fmt.Sprintf. -// -// Calling Sprint(x, y) is equivalent to -// fmt.Sprint(Formatter(x), Formatter(y)), but each operand is -// formatted with "%# v". -func Sprint(a ...interface{}) string { - return fmt.Sprint(wrap(a, true)...) -} - -// Sprintf is a convenience wrapper for fmt.Sprintf. -// -// Calling Sprintf(f, x, y) is equivalent to -// fmt.Sprintf(f, Formatter(x), Formatter(y)). -func Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, wrap(a, false)...) -} - -func wrap(a []interface{}, force bool) []interface{} { - w := make([]interface{}, len(a)) - for i, x := range a { - w[i] = formatter{v: reflect.ValueOf(x), force: force} - } - return w -} diff --git a/vendor/github.com/kr/pretty/zero.go b/vendor/github.com/kr/pretty/zero.go deleted file mode 100644 index abb5b6fc14..0000000000 --- a/vendor/github.com/kr/pretty/zero.go +++ /dev/null @@ -1,41 +0,0 @@ -package pretty - -import ( - "reflect" -) - -func nonzero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() != 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() != 0 - case reflect.Float32, reflect.Float64: - return v.Float() != 0 - case reflect.Complex64, reflect.Complex128: - return v.Complex() != complex(0, 0) - case reflect.String: - return v.String() != "" - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - if nonzero(getField(v, i)) { - return true - } - } - return false - case reflect.Array: - for i := 0; i < v.Len(); i++ { - if nonzero(v.Index(i)) { - return true - } - } - return false - case reflect.Map, reflect.Interface, reflect.Slice, reflect.Ptr, reflect.Chan, reflect.Func: - return !v.IsNil() - case reflect.UnsafePointer: - return v.Pointer() != 0 - } - return true -} diff --git a/vendor/github.com/kr/text/License b/vendor/github.com/kr/text/License deleted file mode 100644 index 480a328059..0000000000 --- a/vendor/github.com/kr/text/License +++ /dev/null @@ -1,19 +0,0 @@ -Copyright 2012 Keith Rarick - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/kr/text/Readme b/vendor/github.com/kr/text/Readme deleted file mode 100644 index 7e6e7c0687..0000000000 --- a/vendor/github.com/kr/text/Readme +++ /dev/null @@ -1,3 +0,0 @@ -This is a Go package for manipulating paragraphs of text. - -See http://go.pkgdoc.org/github.com/kr/text for full documentation. diff --git a/vendor/github.com/kr/text/doc.go b/vendor/github.com/kr/text/doc.go deleted file mode 100644 index cf4c198f95..0000000000 --- a/vendor/github.com/kr/text/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package text provides rudimentary functions for manipulating text in -// paragraphs. -package text diff --git a/vendor/github.com/kr/text/indent.go b/vendor/github.com/kr/text/indent.go deleted file mode 100644 index 4ebac45c09..0000000000 --- a/vendor/github.com/kr/text/indent.go +++ /dev/null @@ -1,74 +0,0 @@ -package text - -import ( - "io" -) - -// Indent inserts prefix at the beginning of each non-empty line of s. The -// end-of-line marker is NL. -func Indent(s, prefix string) string { - return string(IndentBytes([]byte(s), []byte(prefix))) -} - -// IndentBytes inserts prefix at the beginning of each non-empty line of b. -// The end-of-line marker is NL. -func IndentBytes(b, prefix []byte) []byte { - var res []byte - bol := true - for _, c := range b { - if bol && c != '\n' { - res = append(res, prefix...) - } - res = append(res, c) - bol = c == '\n' - } - return res -} - -// Writer indents each line of its input. -type indentWriter struct { - w io.Writer - bol bool - pre [][]byte - sel int - off int -} - -// NewIndentWriter makes a new write filter that indents the input -// lines. Each line is prefixed in order with the corresponding -// element of pre. If there are more lines than elements, the last -// element of pre is repeated for each subsequent line. -func NewIndentWriter(w io.Writer, pre ...[]byte) io.Writer { - return &indentWriter{ - w: w, - pre: pre, - bol: true, - } -} - -// The only errors returned are from the underlying indentWriter. -func (w *indentWriter) Write(p []byte) (n int, err error) { - for _, c := range p { - if w.bol { - var i int - i, err = w.w.Write(w.pre[w.sel][w.off:]) - w.off += i - if err != nil { - return n, err - } - } - _, err = w.w.Write([]byte{c}) - if err != nil { - return n, err - } - n++ - w.bol = c == '\n' - if w.bol { - w.off = 0 - if w.sel < len(w.pre)-1 { - w.sel++ - } - } - } - return n, nil -} diff --git a/vendor/github.com/kr/text/wrap.go b/vendor/github.com/kr/text/wrap.go deleted file mode 100644 index b09bb03736..0000000000 --- a/vendor/github.com/kr/text/wrap.go +++ /dev/null @@ -1,86 +0,0 @@ -package text - -import ( - "bytes" - "math" -) - -var ( - nl = []byte{'\n'} - sp = []byte{' '} -) - -const defaultPenalty = 1e5 - -// Wrap wraps s into a paragraph of lines of length lim, with minimal -// raggedness. -func Wrap(s string, lim int) string { - return string(WrapBytes([]byte(s), lim)) -} - -// WrapBytes wraps b into a paragraph of lines of length lim, with minimal -// raggedness. -func WrapBytes(b []byte, lim int) []byte { - words := bytes.Split(bytes.Replace(bytes.TrimSpace(b), nl, sp, -1), sp) - var lines [][]byte - for _, line := range WrapWords(words, 1, lim, defaultPenalty) { - lines = append(lines, bytes.Join(line, sp)) - } - return bytes.Join(lines, nl) -} - -// WrapWords is the low-level line-breaking algorithm, useful if you need more -// control over the details of the text wrapping process. For most uses, either -// Wrap or WrapBytes will be sufficient and more convenient. -// -// WrapWords splits a list of words into lines with minimal "raggedness", -// treating each byte as one unit, accounting for spc units between adjacent -// words on each line, and attempting to limit lines to lim units. Raggedness -// is the total error over all lines, where error is the square of the -// difference of the length of the line and lim. Too-long lines (which only -// happen when a single word is longer than lim units) have pen penalty units -// added to the error. -func WrapWords(words [][]byte, spc, lim, pen int) [][][]byte { - n := len(words) - - length := make([][]int, n) - for i := 0; i < n; i++ { - length[i] = make([]int, n) - length[i][i] = len(words[i]) - for j := i + 1; j < n; j++ { - length[i][j] = length[i][j-1] + spc + len(words[j]) - } - } - - nbrk := make([]int, n) - cost := make([]int, n) - for i := range cost { - cost[i] = math.MaxInt32 - } - for i := n - 1; i >= 0; i-- { - if length[i][n-1] <= lim || i == n-1 { - cost[i] = 0 - nbrk[i] = n - } else { - for j := i + 1; j < n; j++ { - d := lim - length[i][j-1] - c := d*d + cost[j] - if length[i][j-1] > lim { - c += pen // too-long lines get a worse penalty - } - if c < cost[i] { - cost[i] = c - nbrk[i] = j - } - } - } - } - - var lines [][][]byte - i := 0 - for i < n { - lines = append(lines, words[i:nbrk[i]]) - i = nbrk[i] - } - return lines -} diff --git a/vendor/github.com/rogpeppe/go-internal/LICENSE b/vendor/github.com/rogpeppe/go-internal/LICENSE deleted file mode 100644 index 49ea0f9288..0000000000 --- a/vendor/github.com/rogpeppe/go-internal/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2018 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem.go b/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem.go deleted file mode 100644 index af6124f107..0000000000 --- a/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build go1.12 - -package fmtsort - -import "reflect" - -const brokenNaNs = false - -func mapElems(mapValue reflect.Value) ([]reflect.Value, []reflect.Value) { - // Note: this code is arranged to not panic even in the presence - // of a concurrent map update. The runtime is responsible for - // yelling loudly if that happens. See issue 33275. - n := mapValue.Len() - key := make([]reflect.Value, 0, n) - value := make([]reflect.Value, 0, n) - iter := mapValue.MapRange() - for iter.Next() { - key = append(key, iter.Key()) - value = append(value, iter.Value()) - } - return key, value -} diff --git a/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem_1.11.go b/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem_1.11.go deleted file mode 100644 index 6c2b022202..0000000000 --- a/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem_1.11.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build !go1.12 - -package fmtsort - -import "reflect" - -const brokenNaNs = true - -func mapElems(mapValue reflect.Value) ([]reflect.Value, []reflect.Value) { - key := mapValue.MapKeys() - value := make([]reflect.Value, 0, len(key)) - for _, k := range key { - v := mapValue.MapIndex(k) - if !v.IsValid() { - // Note: we can't retrieve the value, probably because - // the key is NaN, so just do the best we can and - // add a zero value of the correct type in that case. - v = reflect.Zero(mapValue.Type().Elem()) - } - value = append(value, v) - } - return key, value -} diff --git a/vendor/github.com/rogpeppe/go-internal/fmtsort/sort.go b/vendor/github.com/rogpeppe/go-internal/fmtsort/sort.go deleted file mode 100644 index 0fb5187dd8..0000000000 --- a/vendor/github.com/rogpeppe/go-internal/fmtsort/sort.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package fmtsort provides a general stable ordering mechanism -// for maps, on behalf of the fmt and text/template packages. -// It is not guaranteed to be efficient and works only for types -// that are valid map keys. -package fmtsort - -import ( - "reflect" - "sort" -) - -// Note: Throughout this package we avoid calling reflect.Value.Interface as -// it is not always legal to do so and it's easier to avoid the issue than to face it. - -// SortedMap represents a map's keys and values. The keys and values are -// aligned in index order: Value[i] is the value in the map corresponding to Key[i]. -type SortedMap struct { - Key []reflect.Value - Value []reflect.Value -} - -func (o *SortedMap) Len() int { return len(o.Key) } -func (o *SortedMap) Less(i, j int) bool { return compare(o.Key[i], o.Key[j]) < 0 } -func (o *SortedMap) Swap(i, j int) { - o.Key[i], o.Key[j] = o.Key[j], o.Key[i] - o.Value[i], o.Value[j] = o.Value[j], o.Value[i] -} - -// Sort accepts a map and returns a SortedMap that has the same keys and -// values but in a stable sorted order according to the keys, modulo issues -// raised by unorderable key values such as NaNs. -// -// The ordering rules are more general than with Go's < operator: -// -// - when applicable, nil compares low -// - ints, floats, and strings order by < -// - NaN compares less than non-NaN floats -// - bool compares false before true -// - complex compares real, then imag -// - pointers compare by machine address -// - channel values compare by machine address -// - structs compare each field in turn -// - arrays compare each element in turn. -// Otherwise identical arrays compare by length. -// - interface values compare first by reflect.Type describing the concrete type -// and then by concrete value as described in the previous rules. -// -func Sort(mapValue reflect.Value) *SortedMap { - if mapValue.Type().Kind() != reflect.Map { - return nil - } - key, value := mapElems(mapValue) - sorted := &SortedMap{ - Key: key, - Value: value, - } - sort.Stable(sorted) - return sorted -} - -// compare compares two values of the same type. It returns -1, 0, 1 -// according to whether a > b (1), a == b (0), or a < b (-1). -// If the types differ, it returns -1. -// See the comment on Sort for the comparison rules. -func compare(aVal, bVal reflect.Value) int { - aType, bType := aVal.Type(), bVal.Type() - if aType != bType { - return -1 // No good answer possible, but don't return 0: they're not equal. - } - switch aVal.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - a, b := aVal.Int(), bVal.Int() - switch { - case a < b: - return -1 - case a > b: - return 1 - default: - return 0 - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - a, b := aVal.Uint(), bVal.Uint() - switch { - case a < b: - return -1 - case a > b: - return 1 - default: - return 0 - } - case reflect.String: - a, b := aVal.String(), bVal.String() - switch { - case a < b: - return -1 - case a > b: - return 1 - default: - return 0 - } - case reflect.Float32, reflect.Float64: - return floatCompare(aVal.Float(), bVal.Float()) - case reflect.Complex64, reflect.Complex128: - a, b := aVal.Complex(), bVal.Complex() - if c := floatCompare(real(a), real(b)); c != 0 { - return c - } - return floatCompare(imag(a), imag(b)) - case reflect.Bool: - a, b := aVal.Bool(), bVal.Bool() - switch { - case a == b: - return 0 - case a: - return 1 - default: - return -1 - } - case reflect.Ptr: - a, b := aVal.Pointer(), bVal.Pointer() - switch { - case a < b: - return -1 - case a > b: - return 1 - default: - return 0 - } - case reflect.Chan: - if c, ok := nilCompare(aVal, bVal); ok { - return c - } - ap, bp := aVal.Pointer(), bVal.Pointer() - switch { - case ap < bp: - return -1 - case ap > bp: - return 1 - default: - return 0 - } - case reflect.Struct: - for i := 0; i < aVal.NumField(); i++ { - if c := compare(aVal.Field(i), bVal.Field(i)); c != 0 { - return c - } - } - return 0 - case reflect.Array: - for i := 0; i < aVal.Len(); i++ { - if c := compare(aVal.Index(i), bVal.Index(i)); c != 0 { - return c - } - } - return 0 - case reflect.Interface: - if c, ok := nilCompare(aVal, bVal); ok { - return c - } - c := compare(reflect.ValueOf(aVal.Elem().Type()), reflect.ValueOf(bVal.Elem().Type())) - if c != 0 { - return c - } - return compare(aVal.Elem(), bVal.Elem()) - default: - // Certain types cannot appear as keys (maps, funcs, slices), but be explicit. - panic("bad type in compare: " + aType.String()) - } -} - -// nilCompare checks whether either value is nil. If not, the boolean is false. -// If either value is nil, the boolean is true and the integer is the comparison -// value. The comparison is defined to be 0 if both are nil, otherwise the one -// nil value compares low. Both arguments must represent a chan, func, -// interface, map, pointer, or slice. -func nilCompare(aVal, bVal reflect.Value) (int, bool) { - if aVal.IsNil() { - if bVal.IsNil() { - return 0, true - } - return -1, true - } - if bVal.IsNil() { - return 1, true - } - return 0, false -} - -// floatCompare compares two floating-point values. NaNs compare low. -func floatCompare(a, b float64) int { - switch { - case isNaN(a): - return -1 // No good answer if b is a NaN so don't bother checking. - case isNaN(b): - return 1 - case a < b: - return -1 - case a > b: - return 1 - } - return 0 -} - -func isNaN(a float64) bool { - return a != a -} diff --git a/vendor/github.com/stretchr/testify/suite/doc.go b/vendor/github.com/stretchr/testify/suite/doc.go new file mode 100644 index 0000000000..f91a245d3f --- /dev/null +++ b/vendor/github.com/stretchr/testify/suite/doc.go @@ -0,0 +1,65 @@ +// Package suite contains logic for creating testing suite structs +// and running the methods on those structs as tests. The most useful +// piece of this package is that you can create setup/teardown methods +// on your testing suites, which will run before/after the whole suite +// or individual tests (depending on which interface(s) you +// implement). +// +// A testing suite is usually built by first extending the built-in +// suite functionality from suite.Suite in testify. Alternatively, +// you could reproduce that logic on your own if you wanted (you +// just need to implement the TestingSuite interface from +// suite/interfaces.go). +// +// After that, you can implement any of the interfaces in +// suite/interfaces.go to add setup/teardown functionality to your +// suite, and add any methods that start with "Test" to add tests. +// Methods that do not match any suite interfaces and do not begin +// with "Test" will not be run by testify, and can safely be used as +// helper methods. +// +// Once you've built your testing suite, you need to run the suite +// (using suite.Run from testify) inside any function that matches the +// identity that "go test" is already looking for (i.e. +// func(*testing.T)). +// +// Regular expression to select test suites specified command-line +// argument "-run". Regular expression to select the methods +// of test suites specified command-line argument "-m". +// Suite object has assertion methods. +// +// A crude example: +// // Basic imports +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// "github.com/stretchr/testify/suite" +// ) +// +// // Define the suite, and absorb the built-in basic suite +// // functionality from testify - including a T() method which +// // returns the current testing context +// type ExampleTestSuite struct { +// suite.Suite +// VariableThatShouldStartAtFive int +// } +// +// // Make sure that VariableThatShouldStartAtFive is set to five +// // before each test +// func (suite *ExampleTestSuite) SetupTest() { +// suite.VariableThatShouldStartAtFive = 5 +// } +// +// // All methods that begin with "Test" are run as tests within a +// // suite. +// func (suite *ExampleTestSuite) TestExample() { +// assert.Equal(suite.T(), 5, suite.VariableThatShouldStartAtFive) +// suite.Equal(5, suite.VariableThatShouldStartAtFive) +// } +// +// // In order for 'go test' to run this suite, we need to create +// // a normal test function and pass our suite to suite.Run +// func TestExampleTestSuite(t *testing.T) { +// suite.Run(t, new(ExampleTestSuite)) +// } +package suite diff --git a/vendor/github.com/stretchr/testify/suite/interfaces.go b/vendor/github.com/stretchr/testify/suite/interfaces.go new file mode 100644 index 0000000000..8b98a8af27 --- /dev/null +++ b/vendor/github.com/stretchr/testify/suite/interfaces.go @@ -0,0 +1,53 @@ +package suite + +import "testing" + +// TestingSuite can store and return the current *testing.T context +// generated by 'go test'. +type TestingSuite interface { + T() *testing.T + SetT(*testing.T) +} + +// SetupAllSuite has a SetupSuite method, which will run before the +// tests in the suite are run. +type SetupAllSuite interface { + SetupSuite() +} + +// SetupTestSuite has a SetupTest method, which will run before each +// test in the suite. +type SetupTestSuite interface { + SetupTest() +} + +// TearDownAllSuite has a TearDownSuite method, which will run after +// all the tests in the suite have been run. +type TearDownAllSuite interface { + TearDownSuite() +} + +// TearDownTestSuite has a TearDownTest method, which will run after +// each test in the suite. +type TearDownTestSuite interface { + TearDownTest() +} + +// BeforeTest has a function to be executed right before the test +// starts and receives the suite and test names as input +type BeforeTest interface { + BeforeTest(suiteName, testName string) +} + +// AfterTest has a function to be executed right after the test +// finishes and receives the suite and test names as input +type AfterTest interface { + AfterTest(suiteName, testName string) +} + +// WithStats implements HandleStats, a function that will be executed +// when a test suite is finished. The stats contain information about +// the execution of that suite and its tests. +type WithStats interface { + HandleStats(suiteName string, stats *SuiteInformation) +} diff --git a/vendor/github.com/stretchr/testify/suite/stats.go b/vendor/github.com/stretchr/testify/suite/stats.go new file mode 100644 index 0000000000..261da37f78 --- /dev/null +++ b/vendor/github.com/stretchr/testify/suite/stats.go @@ -0,0 +1,46 @@ +package suite + +import "time" + +// SuiteInformation stats stores stats for the whole suite execution. +type SuiteInformation struct { + Start, End time.Time + TestStats map[string]*TestInformation +} + +// TestInformation stores information about the execution of each test. +type TestInformation struct { + TestName string + Start, End time.Time + Passed bool +} + +func newSuiteInformation() *SuiteInformation { + testStats := make(map[string]*TestInformation) + + return &SuiteInformation{ + TestStats: testStats, + } +} + +func (s SuiteInformation) start(testName string) { + s.TestStats[testName] = &TestInformation{ + TestName: testName, + Start: time.Now(), + } +} + +func (s SuiteInformation) end(testName string, passed bool) { + s.TestStats[testName].End = time.Now() + s.TestStats[testName].Passed = passed +} + +func (s SuiteInformation) Passed() bool { + for _, stats := range s.TestStats { + if !stats.Passed { + return false + } + } + + return true +} diff --git a/vendor/github.com/stretchr/testify/suite/suite.go b/vendor/github.com/stretchr/testify/suite/suite.go new file mode 100644 index 0000000000..895591878b --- /dev/null +++ b/vendor/github.com/stretchr/testify/suite/suite.go @@ -0,0 +1,226 @@ +package suite + +import ( + "flag" + "fmt" + "os" + "reflect" + "regexp" + "runtime/debug" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var allTestsFilter = func(_, _ string) (bool, error) { return true, nil } +var matchMethod = flag.String("testify.m", "", "regular expression to select tests of the testify suite to run") + +// Suite is a basic testing suite with methods for storing and +// retrieving the current *testing.T context. +type Suite struct { + *assert.Assertions + mu sync.RWMutex + require *require.Assertions + t *testing.T +} + +// T retrieves the current *testing.T context. +func (suite *Suite) T() *testing.T { + suite.mu.RLock() + defer suite.mu.RUnlock() + return suite.t +} + +// SetT sets the current *testing.T context. +func (suite *Suite) SetT(t *testing.T) { + suite.mu.Lock() + defer suite.mu.Unlock() + suite.t = t + suite.Assertions = assert.New(t) + suite.require = require.New(t) +} + +// Require returns a require context for suite. +func (suite *Suite) Require() *require.Assertions { + suite.mu.Lock() + defer suite.mu.Unlock() + if suite.require == nil { + suite.require = require.New(suite.T()) + } + return suite.require +} + +// Assert returns an assert context for suite. Normally, you can call +// `suite.NoError(expected, actual)`, but for situations where the embedded +// methods are overridden (for example, you might want to override +// assert.Assertions with require.Assertions), this method is provided so you +// can call `suite.Assert().NoError()`. +func (suite *Suite) Assert() *assert.Assertions { + suite.mu.Lock() + defer suite.mu.Unlock() + if suite.Assertions == nil { + suite.Assertions = assert.New(suite.T()) + } + return suite.Assertions +} + +func recoverAndFailOnPanic(t *testing.T) { + r := recover() + failOnPanic(t, r) +} + +func failOnPanic(t *testing.T, r interface{}) { + if r != nil { + t.Errorf("test panicked: %v\n%s", r, debug.Stack()) + t.FailNow() + } +} + +// Run provides suite functionality around golang subtests. It should be +// called in place of t.Run(name, func(t *testing.T)) in test suite code. +// The passed-in func will be executed as a subtest with a fresh instance of t. +// Provides compatibility with go test pkg -run TestSuite/TestName/SubTestName. +func (suite *Suite) Run(name string, subtest func()) bool { + oldT := suite.T() + defer suite.SetT(oldT) + return oldT.Run(name, func(t *testing.T) { + suite.SetT(t) + subtest() + }) +} + +// Run takes a testing suite and runs all of the tests attached +// to it. +func Run(t *testing.T, suite TestingSuite) { + defer recoverAndFailOnPanic(t) + + suite.SetT(t) + + var suiteSetupDone bool + + var stats *SuiteInformation + if _, ok := suite.(WithStats); ok { + stats = newSuiteInformation() + } + + tests := []testing.InternalTest{} + methodFinder := reflect.TypeOf(suite) + suiteName := methodFinder.Elem().Name() + + for i := 0; i < methodFinder.NumMethod(); i++ { + method := methodFinder.Method(i) + + ok, err := methodFilter(method.Name) + if err != nil { + fmt.Fprintf(os.Stderr, "testify: invalid regexp for -m: %s\n", err) + os.Exit(1) + } + + if !ok { + continue + } + + if !suiteSetupDone { + if stats != nil { + stats.Start = time.Now() + } + + if setupAllSuite, ok := suite.(SetupAllSuite); ok { + setupAllSuite.SetupSuite() + } + + suiteSetupDone = true + } + + test := testing.InternalTest{ + Name: method.Name, + F: func(t *testing.T) { + parentT := suite.T() + suite.SetT(t) + defer recoverAndFailOnPanic(t) + defer func() { + r := recover() + + if stats != nil { + passed := !t.Failed() && r == nil + stats.end(method.Name, passed) + } + + if afterTestSuite, ok := suite.(AfterTest); ok { + afterTestSuite.AfterTest(suiteName, method.Name) + } + + if tearDownTestSuite, ok := suite.(TearDownTestSuite); ok { + tearDownTestSuite.TearDownTest() + } + + suite.SetT(parentT) + failOnPanic(t, r) + }() + + if setupTestSuite, ok := suite.(SetupTestSuite); ok { + setupTestSuite.SetupTest() + } + if beforeTestSuite, ok := suite.(BeforeTest); ok { + beforeTestSuite.BeforeTest(methodFinder.Elem().Name(), method.Name) + } + + if stats != nil { + stats.start(method.Name) + } + + method.Func.Call([]reflect.Value{reflect.ValueOf(suite)}) + }, + } + tests = append(tests, test) + } + if suiteSetupDone { + defer func() { + if tearDownAllSuite, ok := suite.(TearDownAllSuite); ok { + tearDownAllSuite.TearDownSuite() + } + + if suiteWithStats, measureStats := suite.(WithStats); measureStats { + stats.End = time.Now() + suiteWithStats.HandleStats(suiteName, stats) + } + }() + } + + runTests(t, tests) +} + +// Filtering method according to set regular expression +// specified command-line argument -m +func methodFilter(name string) (bool, error) { + if ok, _ := regexp.MatchString("^Test", name); !ok { + return false, nil + } + return regexp.MatchString(*matchMethod, name) +} + +func runTests(t testing.TB, tests []testing.InternalTest) { + if len(tests) == 0 { + t.Log("warning: no tests to run") + return + } + + r, ok := t.(runner) + if !ok { // backwards compatibility with Go 1.6 and below + if !testing.RunTests(allTestsFilter, tests) { + t.Fail() + } + return + } + + for _, test := range tests { + r.Run(test.Name, test.F) + } +} + +type runner interface { + Run(name string, f func(t *testing.T)) bool +} diff --git a/vendor/gopkg.in/check.v1/.gitignore b/vendor/gopkg.in/check.v1/.gitignore deleted file mode 100644 index 191a5360b7..0000000000 --- a/vendor/gopkg.in/check.v1/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -_* -*.swp -*.[568] -[568].out diff --git a/vendor/gopkg.in/check.v1/.travis.yml b/vendor/gopkg.in/check.v1/.travis.yml deleted file mode 100644 index ead6735fca..0000000000 --- a/vendor/gopkg.in/check.v1/.travis.yml +++ /dev/null @@ -1,3 +0,0 @@ -language: go - -go_import_path: gopkg.in/check.v1 diff --git a/vendor/gopkg.in/check.v1/LICENSE b/vendor/gopkg.in/check.v1/LICENSE deleted file mode 100644 index 545cf2d331..0000000000 --- a/vendor/gopkg.in/check.v1/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Gocheck - A rich testing framework for Go - -Copyright (c) 2010-2013 Gustavo Niemeyer - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/check.v1/README.md b/vendor/gopkg.in/check.v1/README.md deleted file mode 100644 index 0ca9e57260..0000000000 --- a/vendor/gopkg.in/check.v1/README.md +++ /dev/null @@ -1,20 +0,0 @@ -Instructions -============ - -Install the package with: - - go get gopkg.in/check.v1 - -Import it with: - - import "gopkg.in/check.v1" - -and use _check_ as the package name inside the code. - -For more details, visit the project page: - -* http://labix.org/gocheck - -and the API documentation: - -* https://gopkg.in/check.v1 diff --git a/vendor/gopkg.in/check.v1/TODO b/vendor/gopkg.in/check.v1/TODO deleted file mode 100644 index 33498270ea..0000000000 --- a/vendor/gopkg.in/check.v1/TODO +++ /dev/null @@ -1,2 +0,0 @@ -- Assert(slice, Contains, item) -- Parallel test support diff --git a/vendor/gopkg.in/check.v1/benchmark.go b/vendor/gopkg.in/check.v1/benchmark.go deleted file mode 100644 index 46ea9dc6da..0000000000 --- a/vendor/gopkg.in/check.v1/benchmark.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright (c) 2012 The Go Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package check - -import ( - "fmt" - "runtime" - "time" -) - -var memStats runtime.MemStats - -// testingB is a type passed to Benchmark functions to manage benchmark -// timing and to specify the number of iterations to run. -type timer struct { - start time.Time // Time test or benchmark started - duration time.Duration - N int - bytes int64 - timerOn bool - benchTime time.Duration - // The initial states of memStats.Mallocs and memStats.TotalAlloc. - startAllocs uint64 - startBytes uint64 - // The net total of this test after being run. - netAllocs uint64 - netBytes uint64 -} - -// StartTimer starts timing a test. This function is called automatically -// before a benchmark starts, but it can also used to resume timing after -// a call to StopTimer. -func (c *C) StartTimer() { - if !c.timerOn { - c.start = time.Now() - c.timerOn = true - - runtime.ReadMemStats(&memStats) - c.startAllocs = memStats.Mallocs - c.startBytes = memStats.TotalAlloc - } -} - -// StopTimer stops timing a test. This can be used to pause the timer -// while performing complex initialization that you don't -// want to measure. -func (c *C) StopTimer() { - if c.timerOn { - c.duration += time.Now().Sub(c.start) - c.timerOn = false - runtime.ReadMemStats(&memStats) - c.netAllocs += memStats.Mallocs - c.startAllocs - c.netBytes += memStats.TotalAlloc - c.startBytes - } -} - -// ResetTimer sets the elapsed benchmark time to zero. -// It does not affect whether the timer is running. -func (c *C) ResetTimer() { - if c.timerOn { - c.start = time.Now() - runtime.ReadMemStats(&memStats) - c.startAllocs = memStats.Mallocs - c.startBytes = memStats.TotalAlloc - } - c.duration = 0 - c.netAllocs = 0 - c.netBytes = 0 -} - -// SetBytes informs the number of bytes that the benchmark processes -// on each iteration. If this is called in a benchmark it will also -// report MB/s. -func (c *C) SetBytes(n int64) { - c.bytes = n -} - -func (c *C) nsPerOp() int64 { - if c.N <= 0 { - return 0 - } - return c.duration.Nanoseconds() / int64(c.N) -} - -func (c *C) mbPerSec() float64 { - if c.bytes <= 0 || c.duration <= 0 || c.N <= 0 { - return 0 - } - return (float64(c.bytes) * float64(c.N) / 1e6) / c.duration.Seconds() -} - -func (c *C) timerString() string { - if c.N <= 0 { - return fmt.Sprintf("%3.3fs", float64(c.duration.Nanoseconds())/1e9) - } - mbs := c.mbPerSec() - mb := "" - if mbs != 0 { - mb = fmt.Sprintf("\t%7.2f MB/s", mbs) - } - nsop := c.nsPerOp() - ns := fmt.Sprintf("%10d ns/op", nsop) - if c.N > 0 && nsop < 100 { - // The format specifiers here make sure that - // the ones digits line up for all three possible formats. - if nsop < 10 { - ns = fmt.Sprintf("%13.2f ns/op", float64(c.duration.Nanoseconds())/float64(c.N)) - } else { - ns = fmt.Sprintf("%12.1f ns/op", float64(c.duration.Nanoseconds())/float64(c.N)) - } - } - memStats := "" - if c.benchMem { - allocedBytes := fmt.Sprintf("%8d B/op", int64(c.netBytes)/int64(c.N)) - allocs := fmt.Sprintf("%8d allocs/op", int64(c.netAllocs)/int64(c.N)) - memStats = fmt.Sprintf("\t%s\t%s", allocedBytes, allocs) - } - return fmt.Sprintf("%8d\t%s%s%s", c.N, ns, mb, memStats) -} - -func min(x, y int) int { - if x > y { - return y - } - return x -} - -func max(x, y int) int { - if x < y { - return y - } - return x -} - -// roundDown10 rounds a number down to the nearest power of 10. -func roundDown10(n int) int { - var tens = 0 - // tens = floor(log_10(n)) - for n > 10 { - n = n / 10 - tens++ - } - // result = 10^tens - result := 1 - for i := 0; i < tens; i++ { - result *= 10 - } - return result -} - -// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX]. -func roundUp(n int) int { - base := roundDown10(n) - if n < (2 * base) { - return 2 * base - } - if n < (5 * base) { - return 5 * base - } - return 10 * base -} diff --git a/vendor/gopkg.in/check.v1/check.go b/vendor/gopkg.in/check.v1/check.go deleted file mode 100644 index bba8d8bf6e..0000000000 --- a/vendor/gopkg.in/check.v1/check.go +++ /dev/null @@ -1,876 +0,0 @@ -// Package check is a rich testing extension for Go's testing package. -// -// For details about the project, see: -// -// http://labix.org/gocheck -// -package check - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "reflect" - "regexp" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" -) - -// ----------------------------------------------------------------------- -// Internal type which deals with suite method calling. - -const ( - fixtureKd = iota - testKd -) - -type funcKind int - -const ( - succeededSt = iota - failedSt - skippedSt - panickedSt - fixturePanickedSt - missedSt -) - -type funcStatus uint32 - -// A method value can't reach its own Method structure. -type methodType struct { - reflect.Value - Info reflect.Method -} - -func newMethod(receiver reflect.Value, i int) *methodType { - return &methodType{receiver.Method(i), receiver.Type().Method(i)} -} - -func (method *methodType) PC() uintptr { - return method.Info.Func.Pointer() -} - -func (method *methodType) suiteName() string { - t := method.Info.Type.In(0) - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - return t.Name() -} - -func (method *methodType) String() string { - return method.suiteName() + "." + method.Info.Name -} - -func (method *methodType) matches(re *regexp.Regexp) bool { - return (re.MatchString(method.Info.Name) || - re.MatchString(method.suiteName()) || - re.MatchString(method.String())) -} - -type C struct { - method *methodType - kind funcKind - testName string - _status funcStatus - logb *logger - logw io.Writer - done chan *C - reason string - mustFail bool - tempDir *tempDir - benchMem bool - startTime time.Time - timer -} - -func (c *C) status() funcStatus { - return funcStatus(atomic.LoadUint32((*uint32)(&c._status))) -} - -func (c *C) setStatus(s funcStatus) { - atomic.StoreUint32((*uint32)(&c._status), uint32(s)) -} - -func (c *C) stopNow() { - runtime.Goexit() -} - -// logger is a concurrency safe byte.Buffer -type logger struct { - sync.Mutex - writer bytes.Buffer -} - -func (l *logger) Write(buf []byte) (int, error) { - l.Lock() - defer l.Unlock() - return l.writer.Write(buf) -} - -func (l *logger) WriteTo(w io.Writer) (int64, error) { - l.Lock() - defer l.Unlock() - return l.writer.WriteTo(w) -} - -func (l *logger) String() string { - l.Lock() - defer l.Unlock() - return l.writer.String() -} - -// ----------------------------------------------------------------------- -// Handling of temporary files and directories. - -type tempDir struct { - sync.Mutex - path string - counter int -} - -func (td *tempDir) newPath() string { - td.Lock() - defer td.Unlock() - if td.path == "" { - path, err := ioutil.TempDir("", "check-") - if err != nil { - panic("Couldn't create temporary directory: " + err.Error()) - } - td.path = path - } - result := filepath.Join(td.path, strconv.Itoa(td.counter)) - td.counter++ - return result -} - -func (td *tempDir) removeAll() { - td.Lock() - defer td.Unlock() - if td.path != "" { - err := os.RemoveAll(td.path) - if err != nil { - fmt.Fprintf(os.Stderr, "WARNING: Error cleaning up temporaries: "+err.Error()) - } - } -} - -// Create a new temporary directory which is automatically removed after -// the suite finishes running. -func (c *C) MkDir() string { - path := c.tempDir.newPath() - if err := os.Mkdir(path, 0700); err != nil { - panic(fmt.Sprintf("Couldn't create temporary directory %s: %s", path, err.Error())) - } - return path -} - -// ----------------------------------------------------------------------- -// Low-level logging functions. - -func (c *C) log(args ...interface{}) { - c.writeLog([]byte(fmt.Sprint(args...) + "\n")) -} - -func (c *C) logf(format string, args ...interface{}) { - c.writeLog([]byte(fmt.Sprintf(format+"\n", args...))) -} - -func (c *C) logNewLine() { - c.writeLog([]byte{'\n'}) -} - -func (c *C) writeLog(buf []byte) { - c.logb.Write(buf) - if c.logw != nil { - c.logw.Write(buf) - } -} - -func hasStringOrError(x interface{}) (ok bool) { - _, ok = x.(fmt.Stringer) - if ok { - return - } - _, ok = x.(error) - return -} - -func (c *C) logValue(label string, value interface{}) { - if label == "" { - if hasStringOrError(value) { - c.logf("... %#v (%q)", value, value) - } else { - c.logf("... %#v", value) - } - } else if value == nil { - c.logf("... %s = nil", label) - } else { - if hasStringOrError(value) { - fv := fmt.Sprintf("%#v", value) - qv := fmt.Sprintf("%q", value) - if fv != qv { - c.logf("... %s %s = %s (%s)", label, reflect.TypeOf(value), fv, qv) - return - } - } - if s, ok := value.(string); ok && isMultiLine(s) { - c.logf(`... %s %s = "" +`, label, reflect.TypeOf(value)) - c.logMultiLine(s) - } else { - c.logf("... %s %s = %#v", label, reflect.TypeOf(value), value) - } - } -} - -func formatMultiLine(s string, quote bool) []byte { - b := make([]byte, 0, len(s)*2) - i := 0 - n := len(s) - for i < n { - j := i + 1 - for j < n && s[j-1] != '\n' { - j++ - } - b = append(b, "... "...) - if quote { - b = strconv.AppendQuote(b, s[i:j]) - } else { - b = append(b, s[i:j]...) - b = bytes.TrimSpace(b) - } - if quote && j < n { - b = append(b, " +"...) - } - b = append(b, '\n') - i = j - } - return b -} - -func (c *C) logMultiLine(s string) { - c.writeLog(formatMultiLine(s, true)) -} - -func isMultiLine(s string) bool { - for i := 0; i+1 < len(s); i++ { - if s[i] == '\n' { - return true - } - } - return false -} - -func (c *C) logString(issue string) { - c.log("... ", issue) -} - -func (c *C) logCaller(skip int) { - // This is a bit heavier than it ought to be. - skip++ // Our own frame. - pc, callerFile, callerLine, ok := runtime.Caller(skip) - if !ok { - return - } - var testFile string - var testLine int - testFunc := runtime.FuncForPC(c.method.PC()) - if runtime.FuncForPC(pc) != testFunc { - for { - skip++ - if pc, file, line, ok := runtime.Caller(skip); ok { - // Note that the test line may be different on - // distinct calls for the same test. Showing - // the "internal" line is helpful when debugging. - if runtime.FuncForPC(pc) == testFunc { - testFile, testLine = file, line - break - } - } else { - break - } - } - } - if testFile != "" && (testFile != callerFile || testLine != callerLine) { - c.logCode(testFile, testLine) - } - c.logCode(callerFile, callerLine) -} - -func (c *C) logCode(path string, line int) { - c.logf("%s:%d:", nicePath(path), line) - code, err := printLine(path, line) - if code == "" { - code = "..." // XXX Open the file and take the raw line. - if err != nil { - code += err.Error() - } - } - c.log(indent(code, " ")) -} - -var valueGo = filepath.Join("reflect", "value.go") -var asmGo = filepath.Join("runtime", "asm_") - -func (c *C) logPanic(skip int, value interface{}) { - skip++ // Our own frame. - initialSkip := skip - for ; ; skip++ { - if pc, file, line, ok := runtime.Caller(skip); ok { - if skip == initialSkip { - c.logf("... Panic: %s (PC=0x%X)\n", value, pc) - } - name := niceFuncName(pc) - path := nicePath(file) - if strings.Contains(path, "/gopkg.in/check.v") { - continue - } - if name == "Value.call" && strings.HasSuffix(path, valueGo) { - continue - } - if (name == "call16" || name == "call32") && strings.Contains(path, asmGo) { - continue - } - c.logf("%s:%d\n in %s", nicePath(file), line, name) - } else { - break - } - } -} - -func (c *C) logSoftPanic(issue string) { - c.log("... Panic: ", issue) -} - -func (c *C) logArgPanic(method *methodType, expectedType string) { - c.logf("... Panic: %s argument should be %s", - niceFuncName(method.PC()), expectedType) -} - -// ----------------------------------------------------------------------- -// Some simple formatting helpers. - -var initWD, initWDErr = os.Getwd() - -func init() { - if initWDErr == nil { - initWD = strings.Replace(initWD, "\\", "/", -1) + "/" - } -} - -func nicePath(path string) string { - if initWDErr == nil { - if strings.HasPrefix(path, initWD) { - return path[len(initWD):] - } - } - return path -} - -func niceFuncPath(pc uintptr) string { - function := runtime.FuncForPC(pc) - if function != nil { - filename, line := function.FileLine(pc) - return fmt.Sprintf("%s:%d", nicePath(filename), line) - } - return "" -} - -func niceFuncName(pc uintptr) string { - function := runtime.FuncForPC(pc) - if function != nil { - name := path.Base(function.Name()) - if i := strings.Index(name, "."); i > 0 { - name = name[i+1:] - } - if strings.HasPrefix(name, "(*") { - if i := strings.Index(name, ")"); i > 0 { - name = name[2:i] + name[i+1:] - } - } - if i := strings.LastIndex(name, ".*"); i != -1 { - name = name[:i] + "." + name[i+2:] - } - if i := strings.LastIndex(name, "·"); i != -1 { - name = name[:i] + "." + name[i+2:] - } - return name - } - return "" -} - -// ----------------------------------------------------------------------- -// Result tracker to aggregate call results. - -type Result struct { - Succeeded int - Failed int - Skipped int - Panicked int - FixturePanicked int - ExpectedFailures int - Missed int // Not even tried to run, related to a panic in the fixture. - RunError error // Houston, we've got a problem. - WorkDir string // If KeepWorkDir is true -} - -type resultTracker struct { - result Result - _lastWasProblem bool - _waiting int - _missed int - _expectChan chan *C - _doneChan chan *C - _stopChan chan bool -} - -func newResultTracker() *resultTracker { - return &resultTracker{_expectChan: make(chan *C), // Synchronous - _doneChan: make(chan *C, 32), // Asynchronous - _stopChan: make(chan bool)} // Synchronous -} - -func (tracker *resultTracker) start() { - go tracker._loopRoutine() -} - -func (tracker *resultTracker) waitAndStop() { - <-tracker._stopChan -} - -func (tracker *resultTracker) expectCall(c *C) { - tracker._expectChan <- c -} - -func (tracker *resultTracker) callDone(c *C) { - tracker._doneChan <- c -} - -func (tracker *resultTracker) _loopRoutine() { - for { - var c *C - if tracker._waiting > 0 { - // Calls still running. Can't stop. - select { - // XXX Reindent this (not now to make diff clear) - case <-tracker._expectChan: - tracker._waiting++ - case c = <-tracker._doneChan: - tracker._waiting-- - switch c.status() { - case succeededSt: - if c.kind == testKd { - if c.mustFail { - tracker.result.ExpectedFailures++ - } else { - tracker.result.Succeeded++ - } - } - case failedSt: - tracker.result.Failed++ - case panickedSt: - if c.kind == fixtureKd { - tracker.result.FixturePanicked++ - } else { - tracker.result.Panicked++ - } - case fixturePanickedSt: - // Track it as missed, since the panic - // was on the fixture, not on the test. - tracker.result.Missed++ - case missedSt: - tracker.result.Missed++ - case skippedSt: - if c.kind == testKd { - tracker.result.Skipped++ - } - } - } - } else { - // No calls. Can stop, but no done calls here. - select { - case tracker._stopChan <- true: - return - case <-tracker._expectChan: - tracker._waiting++ - case <-tracker._doneChan: - panic("Tracker got an unexpected done call.") - } - } - } -} - -// ----------------------------------------------------------------------- -// The underlying suite runner. - -type suiteRunner struct { - suite interface{} - setUpSuite, tearDownSuite *methodType - setUpTest, tearDownTest *methodType - tests []*methodType - tracker *resultTracker - tempDir *tempDir - keepDir bool - output *outputWriter - reportedProblemLast bool - benchTime time.Duration - benchMem bool -} - -type RunConf struct { - Output io.Writer - Stream bool - Verbose bool - Filter string - Benchmark bool - BenchmarkTime time.Duration // Defaults to 1 second - BenchmarkMem bool - KeepWorkDir bool -} - -// Create a new suiteRunner able to run all methods in the given suite. -func newSuiteRunner(suite interface{}, runConf *RunConf) *suiteRunner { - var conf RunConf - if runConf != nil { - conf = *runConf - } - if conf.Output == nil { - conf.Output = os.Stdout - } - if conf.Benchmark { - conf.Verbose = true - } - - suiteType := reflect.TypeOf(suite) - suiteNumMethods := suiteType.NumMethod() - suiteValue := reflect.ValueOf(suite) - - runner := &suiteRunner{ - suite: suite, - output: newOutputWriter(conf.Output, conf.Stream, conf.Verbose), - tracker: newResultTracker(), - benchTime: conf.BenchmarkTime, - benchMem: conf.BenchmarkMem, - tempDir: &tempDir{}, - keepDir: conf.KeepWorkDir, - tests: make([]*methodType, 0, suiteNumMethods), - } - if runner.benchTime == 0 { - runner.benchTime = 1 * time.Second - } - - var filterRegexp *regexp.Regexp - if conf.Filter != "" { - regexp, err := regexp.Compile(conf.Filter) - if err != nil { - msg := "Bad filter expression: " + err.Error() - runner.tracker.result.RunError = errors.New(msg) - return runner - } - filterRegexp = regexp - } - - for i := 0; i != suiteNumMethods; i++ { - method := newMethod(suiteValue, i) - switch method.Info.Name { - case "SetUpSuite": - runner.setUpSuite = method - case "TearDownSuite": - runner.tearDownSuite = method - case "SetUpTest": - runner.setUpTest = method - case "TearDownTest": - runner.tearDownTest = method - default: - prefix := "Test" - if conf.Benchmark { - prefix = "Benchmark" - } - if !strings.HasPrefix(method.Info.Name, prefix) { - continue - } - if filterRegexp == nil || method.matches(filterRegexp) { - runner.tests = append(runner.tests, method) - } - } - } - return runner -} - -// Run all methods in the given suite. -func (runner *suiteRunner) run() *Result { - if runner.tracker.result.RunError == nil && len(runner.tests) > 0 { - runner.tracker.start() - if runner.checkFixtureArgs() { - c := runner.runFixture(runner.setUpSuite, "", nil) - if c == nil || c.status() == succeededSt { - for i := 0; i != len(runner.tests); i++ { - c := runner.runTest(runner.tests[i]) - if c.status() == fixturePanickedSt { - runner.skipTests(missedSt, runner.tests[i+1:]) - break - } - } - } else if c != nil && c.status() == skippedSt { - runner.skipTests(skippedSt, runner.tests) - } else { - runner.skipTests(missedSt, runner.tests) - } - runner.runFixture(runner.tearDownSuite, "", nil) - } else { - runner.skipTests(missedSt, runner.tests) - } - runner.tracker.waitAndStop() - if runner.keepDir { - runner.tracker.result.WorkDir = runner.tempDir.path - } else { - runner.tempDir.removeAll() - } - } - return &runner.tracker.result -} - -// Create a call object with the given suite method, and fork a -// goroutine with the provided dispatcher for running it. -func (runner *suiteRunner) forkCall(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C { - var logw io.Writer - if runner.output.Stream { - logw = runner.output - } - if logb == nil { - logb = new(logger) - } - c := &C{ - method: method, - kind: kind, - testName: testName, - logb: logb, - logw: logw, - tempDir: runner.tempDir, - done: make(chan *C, 1), - timer: timer{benchTime: runner.benchTime}, - startTime: time.Now(), - benchMem: runner.benchMem, - } - runner.tracker.expectCall(c) - go (func() { - runner.reportCallStarted(c) - defer runner.callDone(c) - dispatcher(c) - })() - return c -} - -// Same as forkCall(), but wait for call to finish before returning. -func (runner *suiteRunner) runFunc(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C { - c := runner.forkCall(method, kind, testName, logb, dispatcher) - <-c.done - return c -} - -// Handle a finished call. If there were any panics, update the call status -// accordingly. Then, mark the call as done and report to the tracker. -func (runner *suiteRunner) callDone(c *C) { - value := recover() - if value != nil { - switch v := value.(type) { - case *fixturePanic: - if v.status == skippedSt { - c.setStatus(skippedSt) - } else { - c.logSoftPanic("Fixture has panicked (see related PANIC)") - c.setStatus(fixturePanickedSt) - } - default: - c.logPanic(1, value) - c.setStatus(panickedSt) - } - } - if c.mustFail { - switch c.status() { - case failedSt: - c.setStatus(succeededSt) - case succeededSt: - c.setStatus(failedSt) - c.logString("Error: Test succeeded, but was expected to fail") - c.logString("Reason: " + c.reason) - } - } - - runner.reportCallDone(c) - c.done <- c -} - -// Runs a fixture call synchronously. The fixture will still be run in a -// goroutine like all suite methods, but this method will not return -// while the fixture goroutine is not done, because the fixture must be -// run in a desired order. -func (runner *suiteRunner) runFixture(method *methodType, testName string, logb *logger) *C { - if method != nil { - c := runner.runFunc(method, fixtureKd, testName, logb, func(c *C) { - c.ResetTimer() - c.StartTimer() - defer c.StopTimer() - c.method.Call([]reflect.Value{reflect.ValueOf(c)}) - }) - return c - } - return nil -} - -// Run the fixture method with runFixture(), but panic with a fixturePanic{} -// in case the fixture method panics. This makes it easier to track the -// fixture panic together with other call panics within forkTest(). -func (runner *suiteRunner) runFixtureWithPanic(method *methodType, testName string, logb *logger, skipped *bool) *C { - if skipped != nil && *skipped { - return nil - } - c := runner.runFixture(method, testName, logb) - if c != nil && c.status() != succeededSt { - if skipped != nil { - *skipped = c.status() == skippedSt - } - panic(&fixturePanic{c.status(), method}) - } - return c -} - -type fixturePanic struct { - status funcStatus - method *methodType -} - -// Run the suite test method, together with the test-specific fixture, -// asynchronously. -func (runner *suiteRunner) forkTest(method *methodType) *C { - testName := method.String() - return runner.forkCall(method, testKd, testName, nil, func(c *C) { - var skipped bool - defer runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, &skipped) - defer c.StopTimer() - benchN := 1 - for { - runner.runFixtureWithPanic(runner.setUpTest, testName, c.logb, &skipped) - mt := c.method.Type() - if mt.NumIn() != 1 || mt.In(0) != reflect.TypeOf(c) { - // Rather than a plain panic, provide a more helpful message when - // the argument type is incorrect. - c.setStatus(panickedSt) - c.logArgPanic(c.method, "*check.C") - return - } - if strings.HasPrefix(c.method.Info.Name, "Test") { - c.ResetTimer() - c.StartTimer() - c.method.Call([]reflect.Value{reflect.ValueOf(c)}) - return - } - if !strings.HasPrefix(c.method.Info.Name, "Benchmark") { - panic("unexpected method prefix: " + c.method.Info.Name) - } - - runtime.GC() - c.N = benchN - c.ResetTimer() - c.StartTimer() - c.method.Call([]reflect.Value{reflect.ValueOf(c)}) - c.StopTimer() - if c.status() != succeededSt || c.duration >= c.benchTime || benchN >= 1e9 { - return - } - perOpN := int(1e9) - if c.nsPerOp() != 0 { - perOpN = int(c.benchTime.Nanoseconds() / c.nsPerOp()) - } - - // Logic taken from the stock testing package: - // - Run more iterations than we think we'll need for a second (1.5x). - // - Don't grow too fast in case we had timing errors previously. - // - Be sure to run at least one more than last time. - benchN = max(min(perOpN+perOpN/2, 100*benchN), benchN+1) - benchN = roundUp(benchN) - - skipped = true // Don't run the deferred one if this panics. - runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, nil) - skipped = false - } - }) -} - -// Same as forkTest(), but wait for the test to finish before returning. -func (runner *suiteRunner) runTest(method *methodType) *C { - c := runner.forkTest(method) - <-c.done - return c -} - -// Helper to mark tests as skipped or missed. A bit heavy for what -// it does, but it enables homogeneous handling of tracking, including -// nice verbose output. -func (runner *suiteRunner) skipTests(status funcStatus, methods []*methodType) { - for _, method := range methods { - runner.runFunc(method, testKd, "", nil, func(c *C) { - c.setStatus(status) - }) - } -} - -// Verify if the fixture arguments are *check.C. In case of errors, -// log the error as a panic in the fixture method call, and return false. -func (runner *suiteRunner) checkFixtureArgs() bool { - succeeded := true - argType := reflect.TypeOf(&C{}) - for _, method := range []*methodType{runner.setUpSuite, runner.tearDownSuite, runner.setUpTest, runner.tearDownTest} { - if method != nil { - mt := method.Type() - if mt.NumIn() != 1 || mt.In(0) != argType { - succeeded = false - runner.runFunc(method, fixtureKd, "", nil, func(c *C) { - c.logArgPanic(method, "*check.C") - c.setStatus(panickedSt) - }) - } - } - } - return succeeded -} - -func (runner *suiteRunner) reportCallStarted(c *C) { - runner.output.WriteCallStarted("START", c) -} - -func (runner *suiteRunner) reportCallDone(c *C) { - runner.tracker.callDone(c) - switch c.status() { - case succeededSt: - if c.mustFail { - runner.output.WriteCallSuccess("FAIL EXPECTED", c) - } else { - runner.output.WriteCallSuccess("PASS", c) - } - case skippedSt: - runner.output.WriteCallSuccess("SKIP", c) - case failedSt: - runner.output.WriteCallProblem("FAIL", c) - case panickedSt: - runner.output.WriteCallProblem("PANIC", c) - case fixturePanickedSt: - // That's a testKd call reporting that its fixture - // has panicked. The fixture call which caused the - // panic itself was tracked above. We'll report to - // aid debugging. - runner.output.WriteCallProblem("PANIC", c) - case missedSt: - runner.output.WriteCallSuccess("MISS", c) - } -} diff --git a/vendor/gopkg.in/check.v1/checkers.go b/vendor/gopkg.in/check.v1/checkers.go deleted file mode 100644 index 032619792e..0000000000 --- a/vendor/gopkg.in/check.v1/checkers.go +++ /dev/null @@ -1,528 +0,0 @@ -package check - -import ( - "fmt" - "reflect" - "regexp" - "strings" - - "github.com/kr/pretty" -) - -// ----------------------------------------------------------------------- -// CommentInterface and Commentf helper, to attach extra information to checks. - -type comment struct { - format string - args []interface{} -} - -// Commentf returns an infomational value to use with Assert or Check calls. -// If the checker test fails, the provided arguments will be passed to -// fmt.Sprintf, and will be presented next to the logged failure. -// -// For example: -// -// c.Assert(v, Equals, 42, Commentf("Iteration #%d failed.", i)) -// -// Note that if the comment is constant, a better option is to -// simply use a normal comment right above or next to the line, as -// it will also get printed with any errors: -// -// c.Assert(l, Equals, 8192) // Ensure buffer size is correct (bug #123) -// -func Commentf(format string, args ...interface{}) CommentInterface { - return &comment{format, args} -} - -// CommentInterface must be implemented by types that attach extra -// information to failed checks. See the Commentf function for details. -type CommentInterface interface { - CheckCommentString() string -} - -func (c *comment) CheckCommentString() string { - return fmt.Sprintf(c.format, c.args...) -} - -// ----------------------------------------------------------------------- -// The Checker interface. - -// The Checker interface must be provided by checkers used with -// the Assert and Check verification methods. -type Checker interface { - Info() *CheckerInfo - Check(params []interface{}, names []string) (result bool, error string) -} - -// See the Checker interface. -type CheckerInfo struct { - Name string - Params []string -} - -func (info *CheckerInfo) Info() *CheckerInfo { - return info -} - -// ----------------------------------------------------------------------- -// Not checker logic inverter. - -// The Not checker inverts the logic of the provided checker. The -// resulting checker will succeed where the original one failed, and -// vice-versa. -// -// For example: -// -// c.Assert(a, Not(Equals), b) -// -func Not(checker Checker) Checker { - return ¬Checker{checker} -} - -type notChecker struct { - sub Checker -} - -func (checker *notChecker) Info() *CheckerInfo { - info := *checker.sub.Info() - info.Name = "Not(" + info.Name + ")" - return &info -} - -func (checker *notChecker) Check(params []interface{}, names []string) (result bool, error string) { - result, error = checker.sub.Check(params, names) - result = !result - if result { - // clear error message if the new result is true - error = "" - } - return -} - -// ----------------------------------------------------------------------- -// IsNil checker. - -type isNilChecker struct { - *CheckerInfo -} - -// The IsNil checker tests whether the obtained value is nil. -// -// For example: -// -// c.Assert(err, IsNil) -// -var IsNil Checker = &isNilChecker{ - &CheckerInfo{Name: "IsNil", Params: []string{"value"}}, -} - -func (checker *isNilChecker) Check(params []interface{}, names []string) (result bool, error string) { - return isNil(params[0]), "" -} - -func isNil(obtained interface{}) (result bool) { - if obtained == nil { - result = true - } else { - switch v := reflect.ValueOf(obtained); v.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - } - return -} - -// ----------------------------------------------------------------------- -// NotNil checker. Alias for Not(IsNil), since it's so common. - -type notNilChecker struct { - *CheckerInfo -} - -// The NotNil checker verifies that the obtained value is not nil. -// -// For example: -// -// c.Assert(iface, NotNil) -// -// This is an alias for Not(IsNil), made available since it's a -// fairly common check. -// -var NotNil Checker = ¬NilChecker{ - &CheckerInfo{Name: "NotNil", Params: []string{"value"}}, -} - -func (checker *notNilChecker) Check(params []interface{}, names []string) (result bool, error string) { - return !isNil(params[0]), "" -} - -// ----------------------------------------------------------------------- -// Equals checker. - -func diffworthy(a interface{}) bool { - if a == nil { - return false - } - - t := reflect.TypeOf(a) - switch t.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct, reflect.String, reflect.Ptr: - return true - } - return false -} - -// formatUnequal will dump the actual and expected values into a textual -// representation and return an error message containing a diff. -func formatUnequal(obtained interface{}, expected interface{}) string { - // We do not do diffs for basic types because go-check already - // shows them very cleanly. - if !diffworthy(obtained) || !diffworthy(expected) { - return "" - } - - // Handle strings, short strings are ignored (go-check formats - // them very nicely already). We do multi-line strings by - // generating two string slices and using kr.Diff to compare - // those (kr.Diff does not do string diffs by itself). - aStr, aOK := obtained.(string) - bStr, bOK := expected.(string) - if aOK && bOK { - l1 := strings.Split(aStr, "\n") - l2 := strings.Split(bStr, "\n") - // the "2" here is a bit arbitrary - if len(l1) > 2 && len(l2) > 2 { - diff := pretty.Diff(l1, l2) - return fmt.Sprintf(`String difference: -%s`, formatMultiLine(strings.Join(diff, "\n"), false)) - } - // string too short - return "" - } - - // generic diff - diff := pretty.Diff(obtained, expected) - if len(diff) == 0 { - // No diff, this happens when e.g. just struct - // pointers are different but the structs have - // identical values. - return "" - } - - return fmt.Sprintf(`Difference: -%s`, formatMultiLine(strings.Join(diff, "\n"), false)) -} - -type equalsChecker struct { - *CheckerInfo -} - -// The Equals checker verifies that the obtained value is equal to -// the expected value, according to usual Go semantics for ==. -// -// For example: -// -// c.Assert(value, Equals, 42) -// -var Equals Checker = &equalsChecker{ - &CheckerInfo{Name: "Equals", Params: []string{"obtained", "expected"}}, -} - -func (checker *equalsChecker) Check(params []interface{}, names []string) (result bool, error string) { - defer func() { - if v := recover(); v != nil { - result = false - error = fmt.Sprint(v) - } - }() - - result = params[0] == params[1] - if !result { - error = formatUnequal(params[0], params[1]) - } - return -} - -// ----------------------------------------------------------------------- -// DeepEquals checker. - -type deepEqualsChecker struct { - *CheckerInfo -} - -// The DeepEquals checker verifies that the obtained value is deep-equal to -// the expected value. The check will work correctly even when facing -// slices, interfaces, and values of different types (which always fail -// the test). -// -// For example: -// -// c.Assert(value, DeepEquals, 42) -// c.Assert(array, DeepEquals, []string{"hi", "there"}) -// -var DeepEquals Checker = &deepEqualsChecker{ - &CheckerInfo{Name: "DeepEquals", Params: []string{"obtained", "expected"}}, -} - -func (checker *deepEqualsChecker) Check(params []interface{}, names []string) (result bool, error string) { - result = reflect.DeepEqual(params[0], params[1]) - if !result { - error = formatUnequal(params[0], params[1]) - } - return -} - -// ----------------------------------------------------------------------- -// HasLen checker. - -type hasLenChecker struct { - *CheckerInfo -} - -// The HasLen checker verifies that the obtained value has the -// provided length. In many cases this is superior to using Equals -// in conjunction with the len function because in case the check -// fails the value itself will be printed, instead of its length, -// providing more details for figuring the problem. -// -// For example: -// -// c.Assert(list, HasLen, 5) -// -var HasLen Checker = &hasLenChecker{ - &CheckerInfo{Name: "HasLen", Params: []string{"obtained", "n"}}, -} - -func (checker *hasLenChecker) Check(params []interface{}, names []string) (result bool, error string) { - n, ok := params[1].(int) - if !ok { - return false, "n must be an int" - } - value := reflect.ValueOf(params[0]) - switch value.Kind() { - case reflect.Map, reflect.Array, reflect.Slice, reflect.Chan, reflect.String: - default: - return false, "obtained value type has no length" - } - return value.Len() == n, "" -} - -// ----------------------------------------------------------------------- -// ErrorMatches checker. - -type errorMatchesChecker struct { - *CheckerInfo -} - -// The ErrorMatches checker verifies that the error value -// is non nil and matches the regular expression provided. -// -// For example: -// -// c.Assert(err, ErrorMatches, "perm.*denied") -// -var ErrorMatches Checker = errorMatchesChecker{ - &CheckerInfo{Name: "ErrorMatches", Params: []string{"value", "regex"}}, -} - -func (checker errorMatchesChecker) Check(params []interface{}, names []string) (result bool, errStr string) { - if params[0] == nil { - return false, "Error value is nil" - } - err, ok := params[0].(error) - if !ok { - return false, "Value is not an error" - } - params[0] = err.Error() - names[0] = "error" - return matches(params[0], params[1]) -} - -// ----------------------------------------------------------------------- -// Matches checker. - -type matchesChecker struct { - *CheckerInfo -} - -// The Matches checker verifies that the string provided as the obtained -// value (or the string resulting from obtained.String()) matches the -// regular expression provided. -// -// For example: -// -// c.Assert(err, Matches, "perm.*denied") -// -var Matches Checker = &matchesChecker{ - &CheckerInfo{Name: "Matches", Params: []string{"value", "regex"}}, -} - -func (checker *matchesChecker) Check(params []interface{}, names []string) (result bool, error string) { - return matches(params[0], params[1]) -} - -func matches(value, regex interface{}) (result bool, error string) { - reStr, ok := regex.(string) - if !ok { - return false, "Regex must be a string" - } - valueStr, valueIsStr := value.(string) - if !valueIsStr { - if valueWithStr, valueHasStr := value.(fmt.Stringer); valueHasStr { - valueStr, valueIsStr = valueWithStr.String(), true - } - } - if valueIsStr { - matches, err := regexp.MatchString("^"+reStr+"$", valueStr) - if err != nil { - return false, "Can't compile regex: " + err.Error() - } - return matches, "" - } - return false, "Obtained value is not a string and has no .String()" -} - -// ----------------------------------------------------------------------- -// Panics checker. - -type panicsChecker struct { - *CheckerInfo -} - -// The Panics checker verifies that calling the provided zero-argument -// function will cause a panic which is deep-equal to the provided value. -// -// For example: -// -// c.Assert(func() { f(1, 2) }, Panics, &SomeErrorType{"BOOM"}). -// -// -var Panics Checker = &panicsChecker{ - &CheckerInfo{Name: "Panics", Params: []string{"function", "expected"}}, -} - -func (checker *panicsChecker) Check(params []interface{}, names []string) (result bool, error string) { - f := reflect.ValueOf(params[0]) - if f.Kind() != reflect.Func || f.Type().NumIn() != 0 { - return false, "Function must take zero arguments" - } - defer func() { - // If the function has not panicked, then don't do the check. - if error != "" { - return - } - params[0] = recover() - names[0] = "panic" - result = reflect.DeepEqual(params[0], params[1]) - }() - f.Call(nil) - return false, "Function has not panicked" -} - -type panicMatchesChecker struct { - *CheckerInfo -} - -// The PanicMatches checker verifies that calling the provided zero-argument -// function will cause a panic with an error value matching -// the regular expression provided. -// -// For example: -// -// c.Assert(func() { f(1, 2) }, PanicMatches, `open.*: no such file or directory`). -// -// -var PanicMatches Checker = &panicMatchesChecker{ - &CheckerInfo{Name: "PanicMatches", Params: []string{"function", "expected"}}, -} - -func (checker *panicMatchesChecker) Check(params []interface{}, names []string) (result bool, errmsg string) { - f := reflect.ValueOf(params[0]) - if f.Kind() != reflect.Func || f.Type().NumIn() != 0 { - return false, "Function must take zero arguments" - } - defer func() { - // If the function has not panicked, then don't do the check. - if errmsg != "" { - return - } - obtained := recover() - names[0] = "panic" - if e, ok := obtained.(error); ok { - params[0] = e.Error() - } else if _, ok := obtained.(string); ok { - params[0] = obtained - } else { - errmsg = "Panic value is not a string or an error" - return - } - result, errmsg = matches(params[0], params[1]) - }() - f.Call(nil) - return false, "Function has not panicked" -} - -// ----------------------------------------------------------------------- -// FitsTypeOf checker. - -type fitsTypeChecker struct { - *CheckerInfo -} - -// The FitsTypeOf checker verifies that the obtained value is -// assignable to a variable with the same type as the provided -// sample value. -// -// For example: -// -// c.Assert(value, FitsTypeOf, int64(0)) -// c.Assert(value, FitsTypeOf, os.Error(nil)) -// -var FitsTypeOf Checker = &fitsTypeChecker{ - &CheckerInfo{Name: "FitsTypeOf", Params: []string{"obtained", "sample"}}, -} - -func (checker *fitsTypeChecker) Check(params []interface{}, names []string) (result bool, error string) { - obtained := reflect.ValueOf(params[0]) - sample := reflect.ValueOf(params[1]) - if !obtained.IsValid() { - return false, "" - } - if !sample.IsValid() { - return false, "Invalid sample value" - } - return obtained.Type().AssignableTo(sample.Type()), "" -} - -// ----------------------------------------------------------------------- -// Implements checker. - -type implementsChecker struct { - *CheckerInfo -} - -// The Implements checker verifies that the obtained value -// implements the interface specified via a pointer to an interface -// variable. -// -// For example: -// -// var e os.Error -// c.Assert(err, Implements, &e) -// -var Implements Checker = &implementsChecker{ - &CheckerInfo{Name: "Implements", Params: []string{"obtained", "ifaceptr"}}, -} - -func (checker *implementsChecker) Check(params []interface{}, names []string) (result bool, error string) { - obtained := reflect.ValueOf(params[0]) - ifaceptr := reflect.ValueOf(params[1]) - if !obtained.IsValid() { - return false, "" - } - if !ifaceptr.IsValid() || ifaceptr.Kind() != reflect.Ptr || ifaceptr.Elem().Kind() != reflect.Interface { - return false, "ifaceptr should be a pointer to an interface variable" - } - return obtained.Type().Implements(ifaceptr.Elem().Type()), "" -} diff --git a/vendor/gopkg.in/check.v1/helpers.go b/vendor/gopkg.in/check.v1/helpers.go deleted file mode 100644 index 9e0d0bac4c..0000000000 --- a/vendor/gopkg.in/check.v1/helpers.go +++ /dev/null @@ -1,233 +0,0 @@ -package check - -import ( - "fmt" - "strings" - "time" -) - -// TestName returns the current test name in the form "SuiteName.TestName" -func (c *C) TestName() string { - return c.testName -} - -// ----------------------------------------------------------------------- -// Basic succeeding/failing logic. - -// Failed returns whether the currently running test has already failed. -func (c *C) Failed() bool { - return c.status() == failedSt -} - -// Fail marks the currently running test as failed. -// -// Something ought to have been previously logged so the developer can tell -// what went wrong. The higher level helper functions will fail the test -// and do the logging properly. -func (c *C) Fail() { - c.setStatus(failedSt) -} - -// FailNow marks the currently running test as failed and stops running it. -// Something ought to have been previously logged so the developer can tell -// what went wrong. The higher level helper functions will fail the test -// and do the logging properly. -func (c *C) FailNow() { - c.Fail() - c.stopNow() -} - -// Succeed marks the currently running test as succeeded, undoing any -// previous failures. -func (c *C) Succeed() { - c.setStatus(succeededSt) -} - -// SucceedNow marks the currently running test as succeeded, undoing any -// previous failures, and stops running the test. -func (c *C) SucceedNow() { - c.Succeed() - c.stopNow() -} - -// ExpectFailure informs that the running test is knowingly broken for -// the provided reason. If the test does not fail, an error will be reported -// to raise attention to this fact. This method is useful to temporarily -// disable tests which cover well known problems until a better time to -// fix the problem is found, without forgetting about the fact that a -// failure still exists. -func (c *C) ExpectFailure(reason string) { - if reason == "" { - panic("Missing reason why the test is expected to fail") - } - c.mustFail = true - c.reason = reason -} - -// Skip skips the running test for the provided reason. If run from within -// SetUpTest, the individual test being set up will be skipped, and if run -// from within SetUpSuite, the whole suite is skipped. -func (c *C) Skip(reason string) { - if reason == "" { - panic("Missing reason why the test is being skipped") - } - c.reason = reason - c.setStatus(skippedSt) - c.stopNow() -} - -// ----------------------------------------------------------------------- -// Basic logging. - -// GetTestLog returns the current test error output. -func (c *C) GetTestLog() string { - return c.logb.String() -} - -// Log logs some information into the test error output. -// The provided arguments are assembled together into a string with fmt.Sprint. -func (c *C) Log(args ...interface{}) { - c.log(args...) -} - -// Log logs some information into the test error output. -// The provided arguments are assembled together into a string with fmt.Sprintf. -func (c *C) Logf(format string, args ...interface{}) { - c.logf(format, args...) -} - -// Output enables *C to be used as a logger in functions that require only -// the minimum interface of *log.Logger. -func (c *C) Output(calldepth int, s string) error { - d := time.Now().Sub(c.startTime) - msec := d / time.Millisecond - sec := d / time.Second - min := d / time.Minute - - c.Logf("[LOG] %d:%02d.%03d %s", min, sec%60, msec%1000, s) - return nil -} - -// Error logs an error into the test error output and marks the test as failed. -// The provided arguments are assembled together into a string with fmt.Sprint. -func (c *C) Error(args ...interface{}) { - c.logCaller(1) - c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...))) - c.logNewLine() - c.Fail() -} - -// Errorf logs an error into the test error output and marks the test as failed. -// The provided arguments are assembled together into a string with fmt.Sprintf. -func (c *C) Errorf(format string, args ...interface{}) { - c.logCaller(1) - c.logString(fmt.Sprintf("Error: "+format, args...)) - c.logNewLine() - c.Fail() -} - -// Fatal logs an error into the test error output, marks the test as failed, and -// stops the test execution. The provided arguments are assembled together into -// a string with fmt.Sprint. -func (c *C) Fatal(args ...interface{}) { - c.logCaller(1) - c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...))) - c.logNewLine() - c.FailNow() -} - -// Fatlaf logs an error into the test error output, marks the test as failed, and -// stops the test execution. The provided arguments are assembled together into -// a string with fmt.Sprintf. -func (c *C) Fatalf(format string, args ...interface{}) { - c.logCaller(1) - c.logString(fmt.Sprint("Error: ", fmt.Sprintf(format, args...))) - c.logNewLine() - c.FailNow() -} - -// ----------------------------------------------------------------------- -// Generic checks and assertions based on checkers. - -// Check verifies if the first value matches the expected value according -// to the provided checker. If they do not match, an error is logged, the -// test is marked as failed, and the test execution continues. -// -// Some checkers may not need the expected argument (e.g. IsNil). -// -// If the last value in args implements CommentInterface, it is used to log -// additional information instead of being passed to the checker (see Commentf -// for an example). -func (c *C) Check(obtained interface{}, checker Checker, args ...interface{}) bool { - return c.internalCheck("Check", obtained, checker, args...) -} - -// Assert ensures that the first value matches the expected value according -// to the provided checker. If they do not match, an error is logged, the -// test is marked as failed, and the test execution stops. -// -// Some checkers may not need the expected argument (e.g. IsNil). -// -// If the last value in args implements CommentInterface, it is used to log -// additional information instead of being passed to the checker (see Commentf -// for an example). -func (c *C) Assert(obtained interface{}, checker Checker, args ...interface{}) { - if !c.internalCheck("Assert", obtained, checker, args...) { - c.stopNow() - } -} - -func (c *C) internalCheck(funcName string, obtained interface{}, checker Checker, args ...interface{}) bool { - if checker == nil { - c.logCaller(2) - c.logString(fmt.Sprintf("%s(obtained, nil!?, ...):", funcName)) - c.logString("Oops.. you've provided a nil checker!") - c.logNewLine() - c.Fail() - return false - } - - // If the last argument is a bug info, extract it out. - var comment CommentInterface - if len(args) > 0 { - if c, ok := args[len(args)-1].(CommentInterface); ok { - comment = c - args = args[:len(args)-1] - } - } - - params := append([]interface{}{obtained}, args...) - info := checker.Info() - - if len(params) != len(info.Params) { - names := append([]string{info.Params[0], info.Name}, info.Params[1:]...) - c.logCaller(2) - c.logString(fmt.Sprintf("%s(%s):", funcName, strings.Join(names, ", "))) - c.logString(fmt.Sprintf("Wrong number of parameters for %s: want %d, got %d", info.Name, len(names), len(params)+1)) - c.logNewLine() - c.Fail() - return false - } - - // Copy since it may be mutated by Check. - names := append([]string{}, info.Params...) - - // Do the actual check. - result, error := checker.Check(params, names) - if !result || error != "" { - c.logCaller(2) - for i := 0; i != len(params); i++ { - c.logValue(names[i], params[i]) - } - if comment != nil { - c.logString(comment.CheckCommentString()) - } - if error != "" { - c.logString(error) - } - c.logNewLine() - c.Fail() - return false - } - return true -} diff --git a/vendor/gopkg.in/check.v1/printer.go b/vendor/gopkg.in/check.v1/printer.go deleted file mode 100644 index e0f7557b5c..0000000000 --- a/vendor/gopkg.in/check.v1/printer.go +++ /dev/null @@ -1,168 +0,0 @@ -package check - -import ( - "bytes" - "go/ast" - "go/parser" - "go/printer" - "go/token" - "os" -) - -func indent(s, with string) (r string) { - eol := true - for i := 0; i != len(s); i++ { - c := s[i] - switch { - case eol && c == '\n' || c == '\r': - case c == '\n' || c == '\r': - eol = true - case eol: - eol = false - s = s[:i] + with + s[i:] - i += len(with) - } - } - return s -} - -func printLine(filename string, line int) (string, error) { - fset := token.NewFileSet() - file, err := os.Open(filename) - if err != nil { - return "", err - } - fnode, err := parser.ParseFile(fset, filename, file, parser.ParseComments) - if err != nil { - return "", err - } - config := &printer.Config{Mode: printer.UseSpaces, Tabwidth: 4} - lp := &linePrinter{fset: fset, fnode: fnode, line: line, config: config} - ast.Walk(lp, fnode) - result := lp.output.Bytes() - // Comments leave \n at the end. - n := len(result) - for n > 0 && result[n-1] == '\n' { - n-- - } - return string(result[:n]), nil -} - -type linePrinter struct { - config *printer.Config - fset *token.FileSet - fnode *ast.File - line int - output bytes.Buffer - stmt ast.Stmt -} - -func (lp *linePrinter) emit() bool { - if lp.stmt != nil { - lp.trim(lp.stmt) - lp.printWithComments(lp.stmt) - lp.stmt = nil - return true - } - return false -} - -func (lp *linePrinter) printWithComments(n ast.Node) { - nfirst := lp.fset.Position(n.Pos()).Line - nlast := lp.fset.Position(n.End()).Line - for _, g := range lp.fnode.Comments { - cfirst := lp.fset.Position(g.Pos()).Line - clast := lp.fset.Position(g.End()).Line - if clast == nfirst-1 && lp.fset.Position(n.Pos()).Column == lp.fset.Position(g.Pos()).Column { - for _, c := range g.List { - lp.output.WriteString(c.Text) - lp.output.WriteByte('\n') - } - } - if cfirst >= nfirst && cfirst <= nlast && n.End() <= g.List[0].Slash { - // The printer will not include the comment if it starts past - // the node itself. Trick it into printing by overlapping the - // slash with the end of the statement. - g.List[0].Slash = n.End() - 1 - } - } - node := &printer.CommentedNode{n, lp.fnode.Comments} - lp.config.Fprint(&lp.output, lp.fset, node) -} - -func (lp *linePrinter) Visit(n ast.Node) (w ast.Visitor) { - if n == nil { - if lp.output.Len() == 0 { - lp.emit() - } - return nil - } - first := lp.fset.Position(n.Pos()).Line - last := lp.fset.Position(n.End()).Line - if first <= lp.line && last >= lp.line { - // Print the innermost statement containing the line. - if stmt, ok := n.(ast.Stmt); ok { - if _, ok := n.(*ast.BlockStmt); !ok { - lp.stmt = stmt - } - } - if first == lp.line && lp.emit() { - return nil - } - return lp - } - return nil -} - -func (lp *linePrinter) trim(n ast.Node) bool { - stmt, ok := n.(ast.Stmt) - if !ok { - return true - } - line := lp.fset.Position(n.Pos()).Line - if line != lp.line { - return false - } - switch stmt := stmt.(type) { - case *ast.IfStmt: - stmt.Body = lp.trimBlock(stmt.Body) - case *ast.SwitchStmt: - stmt.Body = lp.trimBlock(stmt.Body) - case *ast.TypeSwitchStmt: - stmt.Body = lp.trimBlock(stmt.Body) - case *ast.CaseClause: - stmt.Body = lp.trimList(stmt.Body) - case *ast.CommClause: - stmt.Body = lp.trimList(stmt.Body) - case *ast.BlockStmt: - stmt.List = lp.trimList(stmt.List) - } - return true -} - -func (lp *linePrinter) trimBlock(stmt *ast.BlockStmt) *ast.BlockStmt { - if !lp.trim(stmt) { - return lp.emptyBlock(stmt) - } - stmt.Rbrace = stmt.Lbrace - return stmt -} - -func (lp *linePrinter) trimList(stmts []ast.Stmt) []ast.Stmt { - for i := 0; i != len(stmts); i++ { - if !lp.trim(stmts[i]) { - stmts[i] = lp.emptyStmt(stmts[i]) - break - } - } - return stmts -} - -func (lp *linePrinter) emptyStmt(n ast.Node) *ast.ExprStmt { - return &ast.ExprStmt{&ast.Ellipsis{n.Pos(), nil}} -} - -func (lp *linePrinter) emptyBlock(n ast.Node) *ast.BlockStmt { - p := n.Pos() - return &ast.BlockStmt{p, []ast.Stmt{lp.emptyStmt(n)}, p} -} diff --git a/vendor/gopkg.in/check.v1/reporter.go b/vendor/gopkg.in/check.v1/reporter.go deleted file mode 100644 index fb04f76f64..0000000000 --- a/vendor/gopkg.in/check.v1/reporter.go +++ /dev/null @@ -1,88 +0,0 @@ -package check - -import ( - "fmt" - "io" - "sync" -) - -// ----------------------------------------------------------------------- -// Output writer manages atomic output writing according to settings. - -type outputWriter struct { - m sync.Mutex - writer io.Writer - wroteCallProblemLast bool - Stream bool - Verbose bool -} - -func newOutputWriter(writer io.Writer, stream, verbose bool) *outputWriter { - return &outputWriter{writer: writer, Stream: stream, Verbose: verbose} -} - -func (ow *outputWriter) Write(content []byte) (n int, err error) { - ow.m.Lock() - n, err = ow.writer.Write(content) - ow.m.Unlock() - return -} - -func (ow *outputWriter) WriteCallStarted(label string, c *C) { - if ow.Stream { - header := renderCallHeader(label, c, "", "\n") - ow.m.Lock() - ow.writer.Write([]byte(header)) - ow.m.Unlock() - } -} - -func (ow *outputWriter) WriteCallProblem(label string, c *C) { - var prefix string - if !ow.Stream { - prefix = "\n-----------------------------------" + - "-----------------------------------\n" - } - header := renderCallHeader(label, c, prefix, "\n\n") - ow.m.Lock() - ow.wroteCallProblemLast = true - ow.writer.Write([]byte(header)) - if !ow.Stream { - c.logb.WriteTo(ow.writer) - } - ow.m.Unlock() -} - -func (ow *outputWriter) WriteCallSuccess(label string, c *C) { - if ow.Stream || (ow.Verbose && c.kind == testKd) { - // TODO Use a buffer here. - var suffix string - if c.reason != "" { - suffix = " (" + c.reason + ")" - } - if c.status() == succeededSt { - suffix += "\t" + c.timerString() - } - suffix += "\n" - if ow.Stream { - suffix += "\n" - } - header := renderCallHeader(label, c, "", suffix) - ow.m.Lock() - // Resist temptation of using line as prefix above due to race. - if !ow.Stream && ow.wroteCallProblemLast { - header = "\n-----------------------------------" + - "-----------------------------------\n" + - header - } - ow.wroteCallProblemLast = false - ow.writer.Write([]byte(header)) - ow.m.Unlock() - } -} - -func renderCallHeader(label string, c *C, prefix, suffix string) string { - pc := c.method.PC() - return fmt.Sprintf("%s%s: %s: %s%s", prefix, label, niceFuncPath(pc), - niceFuncName(pc), suffix) -} diff --git a/vendor/gopkg.in/check.v1/run.go b/vendor/gopkg.in/check.v1/run.go deleted file mode 100644 index da8fd79872..0000000000 --- a/vendor/gopkg.in/check.v1/run.go +++ /dev/null @@ -1,175 +0,0 @@ -package check - -import ( - "bufio" - "flag" - "fmt" - "os" - "testing" - "time" -) - -// ----------------------------------------------------------------------- -// Test suite registry. - -var allSuites []interface{} - -// Suite registers the given value as a test suite to be run. Any methods -// starting with the Test prefix in the given value will be considered as -// a test method. -func Suite(suite interface{}) interface{} { - allSuites = append(allSuites, suite) - return suite -} - -// ----------------------------------------------------------------------- -// Public running interface. - -var ( - oldFilterFlag = flag.String("gocheck.f", "", "Regular expression selecting which tests and/or suites to run") - oldVerboseFlag = flag.Bool("gocheck.v", false, "Verbose mode") - oldStreamFlag = flag.Bool("gocheck.vv", false, "Super verbose mode (disables output caching)") - oldBenchFlag = flag.Bool("gocheck.b", false, "Run benchmarks") - oldBenchTime = flag.Duration("gocheck.btime", 1*time.Second, "approximate run time for each benchmark") - oldListFlag = flag.Bool("gocheck.list", false, "List the names of all tests that will be run") - oldWorkFlag = flag.Bool("gocheck.work", false, "Display and do not remove the test working directory") - - newFilterFlag = flag.String("check.f", "", "Regular expression selecting which tests and/or suites to run") - newVerboseFlag = flag.Bool("check.v", false, "Verbose mode") - newStreamFlag = flag.Bool("check.vv", false, "Super verbose mode (disables output caching)") - newBenchFlag = flag.Bool("check.b", false, "Run benchmarks") - newBenchTime = flag.Duration("check.btime", 1*time.Second, "approximate run time for each benchmark") - newBenchMem = flag.Bool("check.bmem", false, "Report memory benchmarks") - newListFlag = flag.Bool("check.list", false, "List the names of all tests that will be run") - newWorkFlag = flag.Bool("check.work", false, "Display and do not remove the test working directory") -) - -// TestingT runs all test suites registered with the Suite function, -// printing results to stdout, and reporting any failures back to -// the "testing" package. -func TestingT(testingT *testing.T) { - benchTime := *newBenchTime - if benchTime == 1*time.Second { - benchTime = *oldBenchTime - } - conf := &RunConf{ - Filter: *oldFilterFlag + *newFilterFlag, - Verbose: *oldVerboseFlag || *newVerboseFlag, - Stream: *oldStreamFlag || *newStreamFlag, - Benchmark: *oldBenchFlag || *newBenchFlag, - BenchmarkTime: benchTime, - BenchmarkMem: *newBenchMem, - KeepWorkDir: *oldWorkFlag || *newWorkFlag, - } - if *oldListFlag || *newListFlag { - w := bufio.NewWriter(os.Stdout) - for _, name := range ListAll(conf) { - fmt.Fprintln(w, name) - } - w.Flush() - return - } - result := RunAll(conf) - println(result.String()) - if !result.Passed() { - testingT.Fail() - } -} - -// RunAll runs all test suites registered with the Suite function, using the -// provided run configuration. -func RunAll(runConf *RunConf) *Result { - result := Result{} - for _, suite := range allSuites { - result.Add(Run(suite, runConf)) - } - return &result -} - -// Run runs the provided test suite using the provided run configuration. -func Run(suite interface{}, runConf *RunConf) *Result { - runner := newSuiteRunner(suite, runConf) - return runner.run() -} - -// ListAll returns the names of all the test functions registered with the -// Suite function that will be run with the provided run configuration. -func ListAll(runConf *RunConf) []string { - var names []string - for _, suite := range allSuites { - names = append(names, List(suite, runConf)...) - } - return names -} - -// List returns the names of the test functions in the given -// suite that will be run with the provided run configuration. -func List(suite interface{}, runConf *RunConf) []string { - var names []string - runner := newSuiteRunner(suite, runConf) - for _, t := range runner.tests { - names = append(names, t.String()) - } - return names -} - -// ----------------------------------------------------------------------- -// Result methods. - -func (r *Result) Add(other *Result) { - r.Succeeded += other.Succeeded - r.Skipped += other.Skipped - r.Failed += other.Failed - r.Panicked += other.Panicked - r.FixturePanicked += other.FixturePanicked - r.ExpectedFailures += other.ExpectedFailures - r.Missed += other.Missed - if r.WorkDir != "" && other.WorkDir != "" { - r.WorkDir += ":" + other.WorkDir - } else if other.WorkDir != "" { - r.WorkDir = other.WorkDir - } -} - -func (r *Result) Passed() bool { - return (r.Failed == 0 && r.Panicked == 0 && - r.FixturePanicked == 0 && r.Missed == 0 && - r.RunError == nil) -} - -func (r *Result) String() string { - if r.RunError != nil { - return "ERROR: " + r.RunError.Error() - } - - var value string - if r.Failed == 0 && r.Panicked == 0 && r.FixturePanicked == 0 && - r.Missed == 0 { - value = "OK: " - } else { - value = "OOPS: " - } - value += fmt.Sprintf("%d passed", r.Succeeded) - if r.Skipped != 0 { - value += fmt.Sprintf(", %d skipped", r.Skipped) - } - if r.ExpectedFailures != 0 { - value += fmt.Sprintf(", %d expected failures", r.ExpectedFailures) - } - if r.Failed != 0 { - value += fmt.Sprintf(", %d FAILED", r.Failed) - } - if r.Panicked != 0 { - value += fmt.Sprintf(", %d PANICKED", r.Panicked) - } - if r.FixturePanicked != 0 { - value += fmt.Sprintf(", %d FIXTURE-PANICKED", r.FixturePanicked) - } - if r.Missed != 0 { - value += fmt.Sprintf(", %d MISSED", r.Missed) - } - if r.WorkDir != "" { - value += "\nWORK=" + r.WorkDir - } - return value -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 1a6884f9b7..344b976a70 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -380,12 +380,6 @@ github.com/klauspost/compress/zstd/internal/xxhash # github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 ## explicit github.com/klauspost/pgzip -# github.com/kr/pretty v0.3.0 -## explicit; go 1.12 -github.com/kr/pretty -# github.com/kr/text v0.2.0 -## explicit -github.com/kr/text # github.com/leodido/go-urn v1.2.1 ## explicit; go 1.13 github.com/leodido/go-urn @@ -473,9 +467,6 @@ github.com/proglottis/gpgme # github.com/rivo/uniseg v0.4.3 ## explicit; go 1.18 github.com/rivo/uniseg -# github.com/rogpeppe/go-internal v1.8.0 -## explicit; go 1.11 -github.com/rogpeppe/go-internal/fmtsort # github.com/russross/blackfriday v2.0.0+incompatible ## explicit # github.com/segmentio/ksuid v1.0.4 @@ -522,6 +513,7 @@ github.com/stefanberger/go-pkcs11uri ## explicit; go 1.13 github.com/stretchr/testify/assert github.com/stretchr/testify/require +github.com/stretchr/testify/suite # github.com/sylabs/sif/v2 v2.9.1 ## explicit; go 1.19 github.com/sylabs/sif/v2/pkg/sif @@ -765,9 +757,6 @@ google.golang.org/protobuf/types/known/anypb google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/fieldmaskpb google.golang.org/protobuf/types/known/timestamppb -# gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c -## explicit; go 1.11 -gopkg.in/check.v1 # gopkg.in/go-jose/go-jose.v2 v2.6.1 ## explicit gopkg.in/go-jose/go-jose.v2