diff --git a/cmd/cache/main.go b/cmd/cache/main.go index 26f128274..f94ed712b 100644 --- a/cmd/cache/main.go +++ b/cmd/cache/main.go @@ -31,26 +31,3 @@ func main() { os.Exit(1) //nolint:gocritic } } - -// package main -// -// import ( -// "log" -// -// "github.com/minio/minio-go" -// ) -// -// func main() { -// endpoint := "play.minio.io:9000" -// accessKeyID := "Q3AM3UQ867SPQQA43P2F" -// secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" -// useSSL := true -// -// // Initialize minio client object. -// minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) -// if err != nil { -// log.Fatalln(err) -// } -// -// log.Printf("%#v\n", minioClient) // minioClient is now setup -// } diff --git a/internal/fetch/fetch.go b/internal/fetch/fetch.go index 674c7a0ca..640589201 100644 --- a/internal/fetch/fetch.go +++ b/internal/fetch/fetch.go @@ -7,6 +7,7 @@ import ( "os" "strings" + "github.com/openshift-pipelines/tekton-caches/internal/provider/gcs" "github.com/openshift-pipelines/tekton-caches/internal/provider/oci" ) @@ -27,8 +28,8 @@ func Fetch(ctx context.Context, hash, target, folder string, insecure bool) erro return oci.Fetch(ctx, hash, newTarget, folder, insecure) case "s3": return fmt.Errorf("s3 schema not (yet) supported: %s", target) - case "gcs": - return fmt.Errorf("gcs schema not (yet) supported: %s", target) + case "gs": + return gcs.Fetch(ctx, hash, newTarget, folder) default: return fmt.Errorf("unknown schema: %s", target) } diff --git a/internal/provider/gcs/common.go b/internal/provider/gcs/common.go new file mode 100644 index 000000000..979f3b2d6 --- /dev/null +++ b/internal/provider/gcs/common.go @@ -0,0 +1,41 @@ +package gcs + +import ( + "context" + "io" + "os" + "path/filepath" + + "cloud.google.com/go/storage" +) + +// realGCS is a wrapper over the GCS client functions. +type realGCS struct { + client *storage.Client + manifestObject string +} + +func (gp realGCS) NewWriter(ctx context.Context, bucket, object string) io.WriteCloser { + if object != gp.manifestObject { + object = filepath.Join(".cache", object) + } + return gp.client.Bucket(bucket).Object(object). + If(storage.Conditions{DoesNotExist: true}). // Skip upload if already exists. + NewWriter(ctx) +} + +func (gp realGCS) NewReader(ctx context.Context, bucket, object string) (io.ReadCloser, error) { + return gp.client.Bucket(bucket).Object(object).NewReader(ctx) +} + +// realOS merely wraps the os package implementations. +type realOS struct{} + +func (realOS) EvalSymlinks(path string) (string, error) { return filepath.EvalSymlinks(path) } +func (realOS) Stat(path string) (os.FileInfo, error) { return os.Stat(path) } +func (realOS) Rename(oldpath, newpath string) error { return os.Rename(oldpath, newpath) } +func (realOS) Chmod(name string, mode os.FileMode) error { return os.Chmod(name, mode) } +func (realOS) Create(name string) (*os.File, error) { return os.Create(name) } +func (realOS) MkdirAll(path string, perm os.FileMode) error { return os.MkdirAll(path, perm) } +func (realOS) Open(name string) (*os.File, error) { return os.Open(name) } +func (realOS) RemoveAll(path string) error { return os.RemoveAll(path) } diff --git a/internal/provider/gcs/fetcher.go b/internal/provider/gcs/fetcher.go new file mode 100644 index 000000000..2453dd667 --- /dev/null +++ b/internal/provider/gcs/fetcher.go @@ -0,0 +1,58 @@ +package gcs + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "cloud.google.com/go/storage" + "github.com/GoogleCloudPlatform/cloud-builders/gcs-fetcher/pkg/common" + "github.com/GoogleCloudPlatform/cloud-builders/gcs-fetcher/pkg/fetcher" + "google.golang.org/api/option" +) + +const ( + sourceType = "Manifest" + stagingFolder = ".download/" + backoff = 100 * time.Millisecond + retries = 0 +) + +func Fetch(ctx context.Context, hash, target, folder string) error { + location := "gs://" + target + hash + ".json" + bucket, object, generation, err := common.ParseBucketObject(location) + if err != nil { + return fmt.Errorf("parsing location from %q failed: %w", location, err) + } + client, err := storage.NewClient(ctx, option.WithUserAgent(userAgent)) + if err != nil { + return fmt.Errorf("failed to create a new gcs client: %w", err) + } + gcs := &fetcher.Fetcher{ + GCS: realGCS{client, object}, + OS: realOS{}, + DestDir: folder, + StagingDir: filepath.Join(folder, stagingFolder), + CreatedDirs: map[string]bool{}, + Bucket: bucket, + Object: object, + Generation: generation, + TimeoutGCS: true, + WorkerCount: workerCount, + Retries: retries, + Backoff: backoff, + SourceType: sourceType, + KeepSource: false, + Verbose: false, + Stdout: os.Stdout, + Stderr: os.Stderr, + } + err = gcs.Fetch(ctx) + if err != nil && !strings.Contains(err.Error(), "storage: object doesn't exist") { + return fmt.Errorf("failed to fetch: %w", err) + } + return nil +} diff --git a/internal/provider/gcs/upload.go b/internal/provider/gcs/upload.go new file mode 100644 index 000000000..551bb1f50 --- /dev/null +++ b/internal/provider/gcs/upload.go @@ -0,0 +1,68 @@ +package gcs + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + + "cloud.google.com/go/storage" + "github.com/GoogleCloudPlatform/cloud-builders/gcs-fetcher/pkg/common" + "github.com/GoogleCloudPlatform/cloud-builders/gcs-fetcher/pkg/uploader" + "google.golang.org/api/option" +) + +const ( + userAgent = "tekton-caches" + // The number of files to upload in parallel. + workerCount = 200 +) + +// gcs-uploader -dir examples/ -location gs://tekton-caches-tests/test + +func Upload(ctx context.Context, hash, target, folder string) error { + location := "gs://" + target + hash + ".json" + client, err := storage.NewClient(ctx, option.WithUserAgent(userAgent)) + if err != nil { + return fmt.Errorf("failed to create a new gcs client: %w", err) + } + bucket, object, generation, err := common.ParseBucketObject(location) + if err != nil { + return fmt.Errorf("parsing location from %q failed: %w", location, err) + } + if generation != 0 { + return errors.New("cannot specify manifest file generation") + } + _, err = client.Bucket(bucket).Object(object).NewReader(ctx) + if err != nil && !errors.Is(err, storage.ErrObjectNotExist) { + return fmt.Errorf("failed to fetch the object: %w", err) + } + // if !errors.Is(err, storage.ErrObjectNotExist) { + // // Delete if the object already exists… + // // It's a workaround to not have the precondition failure… + // if err := client.Bucket(bucket).Object(object).Delete(ctx); err != nil { + // return err + // } + // } + + u := uploader.New(ctx, realGCS{client, object}, realOS{}, bucket, object, workerCount) + + if err := filepath.Walk(folder, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + + return u.Do(ctx, path, info) + }); err != nil { + return fmt.Errorf("failed to walk the path: %w", err) + } + + if err := u.Done(ctx); err != nil { + return fmt.Errorf("failed to upload: %w", err) + } + return nil +} diff --git a/internal/upload/upload.go b/internal/upload/upload.go index 9a2522729..2bd115371 100644 --- a/internal/upload/upload.go +++ b/internal/upload/upload.go @@ -6,6 +6,7 @@ import ( "net/url" "strings" + "github.com/openshift-pipelines/tekton-caches/internal/provider/gcs" "github.com/openshift-pipelines/tekton-caches/internal/provider/oci" ) @@ -20,8 +21,8 @@ func Upload(ctx context.Context, hash, target, folder string, insecure bool) err return oci.Upload(ctx, hash, newTarget, folder, insecure) case "s3": return fmt.Errorf("s3 schema not (yet) supported: %s", target) - case "gcs": - return fmt.Errorf("gcs schema not (yet) supported: %s", target) + case "gs": + return gcs.Upload(ctx, hash, newTarget, folder) default: return fmt.Errorf("unknown schema: %s", target) } diff --git a/tekton/cache-fetch.yaml b/tekton/cache-fetch.yaml index e6f47c1c4..8fe9a3809 100644 --- a/tekton/cache-fetch.yaml +++ b/tekton/cache-fetch.yaml @@ -33,6 +33,11 @@ spec: Whether to use insecure mode for fetching the cache type: string default: "false" + - name: googleCredentialsPath + description: | + The path where to find the google credentials. If left empty, it is ignored. + type: string + default: "" results: # Any result to "publish" ? - name: fetched description: | @@ -46,6 +51,8 @@ spec: value: $(params.workingdir) - name: PARAM_INSECURE value: $(params.insecure) + - name: GOOGLE_APPLICATION_CREDENTIALS + value: $(params.googleCredentialsPath) # FIXME: use a released version once something is released :) image: ghcr.io/openshift-pipelines/tekton-caches/cache:latest args: ["$(params.patterns[*])"] diff --git a/tekton/cache-upload.yaml b/tekton/cache-upload.yaml index b5c109d7e..4f8c70785 100644 --- a/tekton/cache-upload.yaml +++ b/tekton/cache-upload.yaml @@ -43,6 +43,11 @@ spec: Whether to force the cache upload even if it was fetched previously type: string default: "false" + - name: googleCredentialsPath + description: | + The path where to find the google credentials. If left empty, it is ignored. + type: string + default: "" env: - name: PARAM_TARGET value: $(params.target) @@ -56,6 +61,8 @@ spec: value: $(params.fetched) - name: PARAM_FORCE_CACHE_UPLOAD value: $(params.force-cache-upload) + - name: GOOGLE_APPLICATION_CREDENTIALS + value: $(params.googleCredentialsPath) # FIXME: use a released version once something is released :) image: ghcr.io/openshift-pipelines/tekton-caches/cache:latest args: ["$(params.patterns[*])"]