diff --git a/cdnsystem/config/config.go b/cdnsystem/config/config.go index 828e96754..675690e9e 100644 --- a/cdnsystem/config/config.go +++ b/cdnsystem/config/config.go @@ -17,10 +17,14 @@ package config import ( - "fmt" - "io/ioutil" "time" + "d7y.io/dragonfly/v2/cdnsystem/daemon/mgr/cdn/storage" + "d7y.io/dragonfly/v2/cdnsystem/daemon/mgr/cdn/storage/disk" + "d7y.io/dragonfly/v2/cdnsystem/daemon/mgr/cdn/storage/hybrid" + "d7y.io/dragonfly/v2/cdnsystem/plugins" + "d7y.io/dragonfly/v2/cdnsystem/storedriver" + "d7y.io/dragonfly/v2/cdnsystem/storedriver/local" "d7y.io/dragonfly/v2/cmd/dependency/base" "d7y.io/dragonfly/v2/pkg/unit" "d7y.io/dragonfly/v2/pkg/util/net/iputils" @@ -39,22 +43,9 @@ func New() *Config { type Config struct { base.Options `yaml:",inline" mapstructure:",squash"` *BaseProperties `yaml:"base" mapstructure:"base"` - Plugins map[PluginType][]*PluginProperties `yaml:"plugins" mapstructure:"plugins"` - ConfigServer string `yaml:"configServer" mapstructure:"configServer"` -} - -// Load loads config properties from the giving file. -func (c *Config) Load(path string) error { - content, err := ioutil.ReadFile(path) - if err != nil { - return fmt.Errorf("failed to load yaml %s when reading file: %v", path, err) - } - if err = yaml.Unmarshal(content, c); err != nil { - return fmt.Errorf("failed to load yaml %s: %v", path, err) - } - - return nil + Plugins map[plugins.PluginType][]*plugins.PluginProperties `yaml:"plugins" mapstructure:"plugins"` + ConfigServer string `yaml:"configServer" mapstructure:"configServer"` } func (c *Config) String() string { @@ -64,32 +55,63 @@ func (c *Config) String() string { return "" } -// NewDefaultPlugins creates a Plugins instant with default values. -func NewDefaultPlugins() map[PluginType][]*PluginProperties { - return map[PluginType][]*PluginProperties{ - StoragePlugin: { +// NewDefaultPlugins creates plugin instants with default values. +func NewDefaultPlugins() map[plugins.PluginType][]*plugins.PluginProperties { + return map[plugins.PluginType][]*plugins.PluginProperties{ + plugins.StorageDriverPlugin: { { - Name: "disk", + Name: local.DiskDriverName, Enable: true, - Config: map[string]interface{}{ - "baseDir": "/tmp/cdnsystem", - "gcConfig": map[string]interface{}{ - "youngGCThreshold": "100G", - "fullGCThreshold": "5G", - "cleanRatio": 1, - "intervalThreshold": "2h", - }, + Config: &storedriver.Config{ + BaseDir: DefaultDiskBaseDir, }, }, { - Name: "memory", + Name: local.MemoryDriverName, + Enable: true, + Config: &storedriver.Config{ + BaseDir: DefaultMemoryBaseDir, + }, + }, + }, plugins.StorageManagerPlugin: { + { + Name: disk.StorageMode, Enable: true, - Config: map[string]interface{}{ - "baseDir": "/tmp/memory/dragonfly", - "gcConfig": map[string]interface{}{ - "youngGCThreshold": "100G", - "fullGCThreshold": "5G", - "cleanRatio": 3, - "intervalThreshold": "2h", + Config: &storage.Config{ + GCInitialDelay: 0 * time.Second, + GCInterval: 15 * time.Second, + DriverConfigs: map[string]*storage.DriverConfig{ + local.DiskDriverName: { + GCConfig: &storage.GCConfig{ + YoungGCThreshold: 100 * unit.GB, + FullGCThreshold: 5 * unit.GB, + CleanRatio: 1, + IntervalThreshold: 2 * time.Hour, + }}, + }, + }, + }, { + Name: hybrid.StorageMode, + Enable: false, + Config: &storage.Config{ + GCInitialDelay: 0 * time.Second, + GCInterval: 15 * time.Second, + DriverConfigs: map[string]*storage.DriverConfig{ + local.DiskDriverName: { + GCConfig: &storage.GCConfig{ + YoungGCThreshold: 100 * unit.GB, + FullGCThreshold: 5 * unit.GB, + CleanRatio: 1, + IntervalThreshold: 2 * time.Hour, + }, + }, + local.MemoryDriverName: { + GCConfig: &storage.GCConfig{ + YoungGCThreshold: 100 * unit.GB, + FullGCThreshold: 5 * unit.GB, + CleanRatio: 3, + IntervalThreshold: 2 * time.Hour, + }, + }, }, }, }, @@ -107,9 +129,8 @@ func NewDefaultBaseProperties() *BaseProperties { FailAccessInterval: DefaultFailAccessInterval, GCInitialDelay: DefaultGCInitialDelay, GCMetaInterval: DefaultGCMetaInterval, - GCStorageInterval: DefaultGCStorageInterval, TaskExpireTime: DefaultTaskExpireTime, - StoragePattern: DefaultStoragePattern, + StorageMode: DefaultStorageMode, AdvertiseIP: iputils.HostIP, } } @@ -150,15 +171,11 @@ type BaseProperties struct { // default: 2min GCMetaInterval time.Duration `yaml:"gcMetaInterval" mapstructure:"gcMetaInterval"` - // GCStorageInterval is the interval time to execute GC storage. - // default: 15s - GCStorageInterval time.Duration `yaml:"gcStorageInterval" mapstructure:"gcStorageInterval"` - // TaskExpireTime when a task is not accessed within the taskExpireTime, // and it will be treated to be expired. // default: 3min TaskExpireTime time.Duration `yaml:"taskExpireTime" mapstructure:"taskExpireTime"` - // StoragePattern disk/hybrid/memory - StoragePattern string `yaml:"storagePattern" mapstructure:"storagePattern"` + // StorageMode disk/hybrid/memory + StorageMode string `yaml:"storageMode" mapstructure:"storageMode"` } diff --git a/cdnsystem/config/constants.go b/cdnsystem/config/constants.go index 7831f7f62..f62a15190 100644 --- a/cdnsystem/config/constants.go +++ b/cdnsystem/config/constants.go @@ -34,7 +34,7 @@ const ( ) const ( - DefaultStoragePattern = "disk" + DefaultStorageMode = "disk" ) const ( @@ -58,8 +58,6 @@ const ( // DefaultGCMetaInterval is the interval time to execute the GC meta. DefaultGCMetaInterval = 2 * time.Minute - DefaultGCStorageInterval = 15 * time.Second - // DefaultTaskExpireTime when a task is not accessed within the taskExpireTime, // and it will be treated to be expired. DefaultTaskExpireTime = 3 * time.Minute diff --git a/cdnsystem/config/store_path.go b/cdnsystem/config/store_path.go new file mode 100644 index 000000000..83160a1e7 --- /dev/null +++ b/cdnsystem/config/store_path.go @@ -0,0 +1,29 @@ +/* + * Copyright 2020 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package config + +import ( + "path/filepath" + + "d7y.io/dragonfly/v2/pkg/basic" +) + +var ( + DefaultDiskBaseDir = filepath.Join(basic.HomeDir, "ftp") + + DefaultMemoryBaseDir = "/dev/shm/dragonfly" +) diff --git a/cdnsystem/config/testdata/config/cdn.yaml b/cdnsystem/config/testdata/config/cdn.yaml new file mode 100644 index 000000000..6679ed4db --- /dev/null +++ b/cdnsystem/config/testdata/config/cdn.yaml @@ -0,0 +1,94 @@ +--- +base: + # listenPort is the port cdn server listens on. + # default: 8003 + listenPort: 8003 + + # DownloadPort is the port for download files from cdn. + # And you should start a file server firstly which listens on the download port. + # default: 8001 + downloadPort: 8001 + + # SystemReservedBandwidth is the network bandwidth reserved for system software. + # default: 20 MB, in format of G(B)/g/M(B)/m/K(B)/k/B, pure number will also be parsed as Byte. + systemReservedBandwidth: 20M + + # MaxBandwidth is the network bandwidth that cdn can use. + # default: 1G, in format of G(B)/g/M(B)/m/K(B)/k/B, pure number will also be parsed as Byte. + maxBandwidth: 1G + + # FailAccessInterval is the interval time after failed to access the URL. + # If a task failed to be downloaded from the source, it will not be retried in the time since the last failure. + # default: 3m + failAccessInterval: 3m + + # GCInitialDelay is the delay time from the start to the first GC execution. + # default: 6s + gcInitialDelay: 6s + + # GCMetaInterval is the interval time to execute GC meta. + # default: 2m0s + gcMetaInterval: 2m + + # TaskExpireTime when a task is not accessed within the taskExpireTime, + # and it will be treated to be expired. + # default: 3m0s + taskExpireTime: 3m + + # storageMode is the mode of storage policy, [disk/hybrid] + storageMode: disk + +plugins: + storageDriver: + - name: disk + enable: true + config: + baseDir: /tmp/cdnsystem2 + - name: memory + enable: true + config: + baseDir: /tmp/memory/dragonfly + + storageManager: + - name: disk + enable: true + config: + gcInitialDelay: 5s + gcInterval: 15s + driverConfigs: + disk: + gcConfig: + youngGCThreshold: 100G + fullGCThreshold: 5G + cleanRatio: 1 + intervalThreshold: 2h + - name: hybrid + enable: false + config: + gcInitialDelay: 5s + gcInterval: 15s + driverConfigs: + disk: + gcConfig: + youngGCThreshold: 100G + fullGCThreshold: 5G + cleanRatio: 1 + intervalThreshold: 2h + memory: + gcConfig: + youngGCThreshold: 100G + fullGCThreshold: 5G + cleanRatio: 3 + intervalThreshold: 2h + +# Console shows log on console +# default: false +console: false + +# Whether to enable debug level logger and enable pprof +# default: false +verbose: false + +# listen port for pprof, only valid when the verbose option is true +# default is random port +pprofPort: 0 \ No newline at end of file diff --git a/cdnsystem/daemon/mgr/cdn/cache_data_mgr.go b/cdnsystem/daemon/mgr/cdn/cache_data_mgr.go index d72d17676..d8d9533f1 100644 --- a/cdnsystem/daemon/mgr/cdn/cache_data_mgr.go +++ b/cdnsystem/daemon/mgr/cdn/cache_data_mgr.go @@ -18,7 +18,6 @@ package cdn import ( "bytes" - "context" "fmt" "io" "time" @@ -47,7 +46,7 @@ func newCacheDataManager(storeMgr storage.Manager) *cacheDataManager { } // writeFileMetaDataByTask stores the metadata of task by task to storage. -func (mm *cacheDataManager) writeFileMetaDataByTask(ctx context.Context, task *types.SeedTask) (*storage.FileMetaData, error) { +func (mm *cacheDataManager) writeFileMetaDataByTask(task *types.SeedTask) (*storage.FileMetaData, error) { mm.cacheLocker.Lock(task.TaskID, false) defer mm.cacheLocker.UnLock(task.TaskID, false) metaData := &storage.FileMetaData{ @@ -60,7 +59,7 @@ func (mm *cacheDataManager) writeFileMetaDataByTask(ctx context.Context, task *t TotalPieceCount: task.PieceTotal, } - if err := mm.storage.WriteFileMetaData(ctx, task.TaskID, metaData); err != nil { + if err := mm.storage.WriteFileMetaData(task.TaskID, metaData); err != nil { return nil, errors.Wrapf(err, "failed to write file metadata to storage") } @@ -68,11 +67,11 @@ func (mm *cacheDataManager) writeFileMetaDataByTask(ctx context.Context, task *t } // updateAccessTime update access and interval -func (mm *cacheDataManager) updateAccessTime(ctx context.Context, taskID string, accessTime int64) error { +func (mm *cacheDataManager) updateAccessTime(taskID string, accessTime int64) error { mm.cacheLocker.Lock(taskID, false) defer mm.cacheLocker.UnLock(taskID, false) - originMetaData, err := mm.readFileMetaData(ctx, taskID) + originMetaData, err := mm.readFileMetaData(taskID) if err != nil { return err } @@ -86,28 +85,28 @@ func (mm *cacheDataManager) updateAccessTime(ctx context.Context, taskID string, originMetaData.AccessTime = accessTime - return mm.storage.WriteFileMetaData(ctx, taskID, originMetaData) + return mm.storage.WriteFileMetaData(taskID, originMetaData) } -func (mm *cacheDataManager) updateExpireInfo(ctx context.Context, taskID string, expireInfo map[string]string) error { +func (mm *cacheDataManager) updateExpireInfo(taskID string, expireInfo map[string]string) error { mm.cacheLocker.Lock(taskID, false) defer mm.cacheLocker.UnLock(taskID, false) - originMetaData, err := mm.readFileMetaData(ctx, taskID) + originMetaData, err := mm.readFileMetaData(taskID) if err != nil { return err } originMetaData.ExpireInfo = expireInfo - return mm.storage.WriteFileMetaData(ctx, taskID, originMetaData) + return mm.storage.WriteFileMetaData(taskID, originMetaData) } -func (mm *cacheDataManager) updateStatusAndResult(ctx context.Context, taskID string, metaData *storage.FileMetaData) error { +func (mm *cacheDataManager) updateStatusAndResult(taskID string, metaData *storage.FileMetaData) error { mm.cacheLocker.Lock(taskID, false) defer mm.cacheLocker.UnLock(taskID, false) - originMetaData, err := mm.readFileMetaData(ctx, taskID) + originMetaData, err := mm.readFileMetaData(taskID) if err != nil { return err } @@ -127,30 +126,30 @@ func (mm *cacheDataManager) updateStatusAndResult(ctx context.Context, taskID st originMetaData.PieceMd5Sign = metaData.PieceMd5Sign } } - return mm.storage.WriteFileMetaData(ctx, taskID, originMetaData) + return mm.storage.WriteFileMetaData(taskID, originMetaData) } // appendPieceMetaData append piece meta info to storage -func (mm *cacheDataManager) appendPieceMetaData(ctx context.Context, taskID string, record *storage.PieceMetaRecord) error { +func (mm *cacheDataManager) appendPieceMetaData(taskID string, record *storage.PieceMetaRecord) error { mm.cacheLocker.Lock(taskID, false) defer mm.cacheLocker.UnLock(taskID, false) // write to the storage - return mm.storage.AppendPieceMetaData(ctx, taskID, record) + return mm.storage.AppendPieceMetaData(taskID, record) } // appendPieceMetaData append piece meta info to storage -func (mm *cacheDataManager) writePieceMetaRecords(ctx context.Context, taskID string, records []*storage.PieceMetaRecord) error { +func (mm *cacheDataManager) writePieceMetaRecords(taskID string, records []*storage.PieceMetaRecord) error { mm.cacheLocker.Lock(taskID, false) defer mm.cacheLocker.UnLock(taskID, false) // write to the storage - return mm.storage.WritePieceMetaRecords(ctx, taskID, records) + return mm.storage.WritePieceMetaRecords(taskID, records) } // readAndCheckPieceMetaRecords reads pieceMetaRecords from storage and check data integrity by the md5 file of the TaskId -func (mm *cacheDataManager) readAndCheckPieceMetaRecords(ctx context.Context, taskID, pieceMd5Sign string) ([]*storage.PieceMetaRecord, error) { +func (mm *cacheDataManager) readAndCheckPieceMetaRecords(taskID, pieceMd5Sign string) ([]*storage.PieceMetaRecord, error) { mm.cacheLocker.Lock(taskID, true) defer mm.cacheLocker.UnLock(taskID, true) - md5Sign, pieceMetaRecords, err := mm.getPieceMd5Sign(ctx, taskID) + md5Sign, pieceMetaRecords, err := mm.getPieceMd5Sign(taskID) if err != nil { return nil, err } @@ -162,14 +161,14 @@ func (mm *cacheDataManager) readAndCheckPieceMetaRecords(ctx context.Context, ta } // readPieceMetaRecords reads pieceMetaRecords from storage and without check data integrity -func (mm *cacheDataManager) readPieceMetaRecords(ctx context.Context, taskID string) ([]*storage.PieceMetaRecord, error) { +func (mm *cacheDataManager) readPieceMetaRecords(taskID string) ([]*storage.PieceMetaRecord, error) { mm.cacheLocker.Lock(taskID, true) defer mm.cacheLocker.UnLock(taskID, true) - return mm.storage.ReadPieceMetaRecords(ctx, taskID) + return mm.storage.ReadPieceMetaRecords(taskID) } -func (mm *cacheDataManager) getPieceMd5Sign(ctx context.Context, taskID string) (string, []*storage.PieceMetaRecord, error) { - pieceMetaRecords, err := mm.storage.ReadPieceMetaRecords(ctx, taskID) +func (mm *cacheDataManager) getPieceMd5Sign(taskID string) (string, []*storage.PieceMetaRecord, error) { + pieceMetaRecords, err := mm.storage.ReadPieceMetaRecords(taskID) if err != nil { return "", nil, errors.Wrapf(err, "failed to read piece meta file") } @@ -180,28 +179,28 @@ func (mm *cacheDataManager) getPieceMd5Sign(ctx context.Context, taskID string) return digestutils.Sha256(pieceMd5...), pieceMetaRecords, nil } -func (mm *cacheDataManager) readFileMetaData(ctx context.Context, taskID string) (*storage.FileMetaData, error) { - fileMeta, err := mm.storage.ReadFileMetaData(ctx, taskID) +func (mm *cacheDataManager) readFileMetaData(taskID string) (*storage.FileMetaData, error) { + fileMeta, err := mm.storage.ReadFileMetaData(taskID) if err != nil { return nil, errors.Wrapf(err, "failed to read file metadata from storage") } return fileMeta, nil } -func (mm *cacheDataManager) statDownloadFile(ctx context.Context, taskID string) (*storedriver.StorageInfo, error) { - return mm.storage.StatDownloadFile(ctx, taskID) +func (mm *cacheDataManager) statDownloadFile(taskID string) (*storedriver.StorageInfo, error) { + return mm.storage.StatDownloadFile(taskID) } -func (mm *cacheDataManager) readDownloadFile(ctx context.Context, taskID string) (io.ReadCloser, error) { - return mm.storage.ReadDownloadFile(ctx, taskID) +func (mm *cacheDataManager) readDownloadFile(taskID string) (io.ReadCloser, error) { + return mm.storage.ReadDownloadFile(taskID) } -func (mm *cacheDataManager) resetRepo(ctx context.Context, task *types.SeedTask) error { +func (mm *cacheDataManager) resetRepo(task *types.SeedTask) error { mm.cacheLocker.Lock(task.TaskID, false) defer mm.cacheLocker.UnLock(task.TaskID, false) - return mm.storage.ResetRepo(ctx, task) + return mm.storage.ResetRepo(task) } -func (mm *cacheDataManager) writeDownloadFile(ctx context.Context, taskID string, offset int64, len int64, buf *bytes.Buffer) error { - return mm.storage.WriteDownloadFile(ctx, taskID, offset, len, buf) +func (mm *cacheDataManager) writeDownloadFile(taskID string, offset int64, len int64, buf *bytes.Buffer) error { + return mm.storage.WriteDownloadFile(taskID, offset, len, buf) } diff --git a/cdnsystem/daemon/mgr/cdn/cache_detector.go b/cdnsystem/daemon/mgr/cdn/cache_detector.go index 165b9f811..1fbcecea3 100644 --- a/cdnsystem/daemon/mgr/cdn/cache_detector.go +++ b/cdnsystem/daemon/mgr/cdn/cache_detector.go @@ -17,7 +17,6 @@ package cdn import ( - "context" "crypto/md5" "fmt" "hash" @@ -58,15 +57,15 @@ func newCacheDetector(cacheDataManager *cacheDataManager, resourceClient source. } } -func (cd *cacheDetector) detectCache(ctx context.Context, task *types.SeedTask) (*cacheResult, error) { +func (cd *cacheDetector) detectCache(task *types.SeedTask) (*cacheResult, error) { //err := cd.cacheStore.CreateUploadLink(ctx, task.TaskId) //if err != nil { // return nil, errors.Wrapf(err, "failed to create upload symbolic link") //} - result, err := cd.doDetect(ctx, task) + result, err := cd.doDetect(task) if err != nil { logger.WithTaskID(task.TaskID).Infof("failed to detect cache, reset cache: %v", err) - metaData, err := cd.resetCache(ctx, task) + metaData, err := cd.resetCache(task) if err == nil { result = &cacheResult{ fileMetaData: metaData, @@ -75,15 +74,15 @@ func (cd *cacheDetector) detectCache(ctx context.Context, task *types.SeedTask) } return result, err } - if err := cd.cacheDataManager.updateAccessTime(ctx, task.TaskID, getCurrentTimeMillisFunc()); err != nil { + if err := cd.cacheDataManager.updateAccessTime(task.TaskID, getCurrentTimeMillisFunc()); err != nil { logger.WithTaskID(task.TaskID).Warnf("failed to update task access time ") } return result, nil } // detectCache the actual detect action which detects file metaData and pieces metaData of specific task -func (cd *cacheDetector) doDetect(ctx context.Context, task *types.SeedTask) (result *cacheResult, err error) { - fileMetaData, err := cd.cacheDataManager.readFileMetaData(ctx, task.TaskID) +func (cd *cacheDetector) doDetect(task *types.SeedTask) (result *cacheResult, err error) { + fileMetaData, err := cd.cacheDataManager.readFileMetaData(task.TaskID) if err != nil { return nil, errors.Wrapf(err, "failed to read file meta data") } @@ -103,7 +102,7 @@ func (cd *cacheDetector) doDetect(ctx context.Context, task *types.SeedTask) (re // not expired if fileMetaData.Finish { // quickly detect the cache situation through the meta data - return cd.parseByReadMetaFile(ctx, task.TaskID, fileMetaData) + return cd.parseByReadMetaFile(task.TaskID, fileMetaData) } // check if the resource supports range request. if so, // detect the cache situation by reading piece meta and data file @@ -114,16 +113,15 @@ func (cd *cacheDetector) doDetect(ctx context.Context, task *types.SeedTask) (re if !supportRange { return nil, errors.Wrapf(cdnerrors.ErrResourceNotSupportRangeRequest, "url:%s", task.URL) } - return cd.parseByReadFile(ctx, task.TaskID, fileMetaData) + return cd.parseByReadFile(task.TaskID, fileMetaData) } // parseByReadMetaFile detect cache by read meta and pieceMeta files of task -func (cd *cacheDetector) parseByReadMetaFile(ctx context.Context, taskID string, - fileMetaData *storage.FileMetaData) (*cacheResult, error) { +func (cd *cacheDetector) parseByReadMetaFile(taskID string, fileMetaData *storage.FileMetaData) (*cacheResult, error) { if !fileMetaData.Success { return nil, errors.Wrapf(cdnerrors.ErrDownloadFail, "success flag of download is false") } - pieceMetaRecords, err := cd.cacheDataManager.readAndCheckPieceMetaRecords(ctx, taskID, fileMetaData.PieceMd5Sign) + pieceMetaRecords, err := cd.cacheDataManager.readAndCheckPieceMetaRecords(taskID, fileMetaData.PieceMd5Sign) if err != nil { return nil, errors.Wrapf(err, "failed to check piece meta integrity") } @@ -131,7 +129,7 @@ func (cd *cacheDetector) parseByReadMetaFile(ctx context.Context, taskID string, return nil, errors.Wrapf(cdnerrors.ErrPieceCountNotEqual, "piece file piece count(%d), "+ "meta file piece count(%d)", len(pieceMetaRecords), fileMetaData.TotalPieceCount) } - storageInfo, err := cd.cacheDataManager.statDownloadFile(ctx, taskID) + storageInfo, err := cd.cacheDataManager.statDownloadFile(taskID) if err != nil { return nil, errors.Wrapf(err, "failed to get cdn file length") } @@ -149,13 +147,13 @@ func (cd *cacheDetector) parseByReadMetaFile(ctx context.Context, taskID string, } // parseByReadFile detect cache by read pieceMeta and data files of task -func (cd *cacheDetector) parseByReadFile(ctx context.Context, taskID string, metaData *storage.FileMetaData) (*cacheResult, error) { - reader, err := cd.cacheDataManager.readDownloadFile(ctx, taskID) +func (cd *cacheDetector) parseByReadFile(taskID string, metaData *storage.FileMetaData) (*cacheResult, error) { + reader, err := cd.cacheDataManager.readDownloadFile(taskID) if err != nil { return nil, errors.Wrapf(err, "failed to read data file") } defer reader.Close() - tempRecords, err := cd.cacheDataManager.readPieceMetaRecords(ctx, taskID) + tempRecords, err := cd.cacheDataManager.readPieceMetaRecords(taskID) if err != nil { return nil, errors.Wrapf(err, "parseByReadFile:failed to read piece meta file") } @@ -181,7 +179,7 @@ func (cd *cacheDetector) parseByReadFile(ctx context.Context, taskID string, met pieceMetaRecords = append(pieceMetaRecords, tempRecords[index]) } if len(tempRecords) != len(pieceMetaRecords) { - if err := cd.cacheDataManager.writePieceMetaRecords(ctx, taskID, pieceMetaRecords); err != nil { + if err := cd.cacheDataManager.writePieceMetaRecords(taskID, pieceMetaRecords); err != nil { return nil, errors.Wrapf(err, "write piece meta records failed") } } @@ -204,11 +202,11 @@ func (cd *cacheDetector) parseByReadFile(ctx context.Context, taskID string, met } // resetCache -func (cd *cacheDetector) resetCache(ctx context.Context, task *types.SeedTask) (*storage.FileMetaData, error) { - err := cd.cacheDataManager.resetRepo(ctx, task) +func (cd *cacheDetector) resetCache(task *types.SeedTask) (*storage.FileMetaData, error) { + err := cd.cacheDataManager.resetRepo(task) if err != nil { return nil, err } // initialize meta data file - return cd.cacheDataManager.writeFileMetaDataByTask(ctx, task) + return cd.cacheDataManager.writeFileMetaDataByTask(task) } diff --git a/cdnsystem/daemon/mgr/cdn/cache_writer.go b/cdnsystem/daemon/mgr/cdn/cache_writer.go index 3649c84c8..070273686 100644 --- a/cdnsystem/daemon/mgr/cdn/cache_writer.go +++ b/cdnsystem/daemon/mgr/cdn/cache_writer.go @@ -127,12 +127,12 @@ func (cw *cacheWriter) startWriter(ctx context.Context, reader io.Reader, task * close(jobCh) wg.Wait() - storageInfo, err := cw.cacheDataManager.statDownloadFile(ctx, task.TaskID) + storageInfo, err := cw.cacheDataManager.statDownloadFile(task.TaskID) if err != nil { return &downloadMetadata{backSourceLength: backSourceFileLength}, errors.Wrapf(err, "failed to get cdn file length") } - pieceMd5Sign, _, err := cw.cacheDataManager.getPieceMd5Sign(ctx, task.TaskID) + pieceMd5Sign, _, err := cw.cacheDataManager.getPieceMd5Sign(task.TaskID) if err != nil { return &downloadMetadata{backSourceLength: backSourceFileLength}, errors.Wrapf(err, "failed to get piece md5 sign") } diff --git a/cdnsystem/daemon/mgr/cdn/cache_writer_util.go b/cdnsystem/daemon/mgr/cdn/cache_writer_util.go index b0112a77f..3855d3868 100644 --- a/cdnsystem/daemon/mgr/cdn/cache_writer_util.go +++ b/cdnsystem/daemon/mgr/cdn/cache_writer_util.go @@ -65,11 +65,11 @@ func (cw *cacheWriter) writerPool(ctx context.Context, wg *sync.WaitGroup, write // todo 后续压缩等特性通过waitToWriteContent 和 pieceStyle 实现 waitToWriteContent := job.pieceContent // 要写盘数据的长度 - originPieceLen := waitToWriteContent.Len() // 未作处理的原始数据长度 - pieceLen := originPieceLen // 经过处理后写到存储介质的真实长度 + originPieceLen := waitToWriteContent.Len() // the length of the original data that has not been processed + pieceLen := originPieceLen // the real length written to the storage medium after processing pieceStyle := types.PlainUnspecified - if err := cw.writeToFile(ctx, job.TaskID, waitToWriteContent, int64(job.pieceNum)*int64(job.pieceSize), pieceMd5); err != nil { + if err := cw.writeToFile(job.TaskID, waitToWriteContent, int64(job.pieceNum)*int64(job.pieceSize), pieceMd5); err != nil { logger.WithTaskID(job.TaskID).Errorf("failed to write file, pieceNum %d: %v", job.pieceNum, err) // todo redo the job? continue @@ -94,8 +94,7 @@ func (cw *cacheWriter) writerPool(ctx context.Context, wg *sync.WaitGroup, write // write piece meta to storage go func(record *storage.PieceMetaRecord) { defer wg.Done() - // todo 可以先塞入channel,然后启动单独goroutine顺序写文件 - if err := cw.cacheDataManager.appendPieceMetaData(ctx, job.TaskID, record); err != nil { + if err := cw.cacheDataManager.appendPieceMetaData(job.TaskID, record); err != nil { logger.WithTaskID(job.TaskID).Errorf("failed to append piece meta data to file:%v", err) } }(pieceRecord) @@ -114,7 +113,7 @@ func (cw *cacheWriter) writerPool(ctx context.Context, wg *sync.WaitGroup, write } // writeToFile -func (cw *cacheWriter) writeToFile(ctx context.Context, taskID string, bytesBuffer *bytes.Buffer, offset int64, pieceMd5 hash.Hash) error { +func (cw *cacheWriter) writeToFile(taskID string, bytesBuffer *bytes.Buffer, offset int64, pieceMd5 hash.Hash) error { var resultBuf = &bytes.Buffer{} // write piece content var pieceContent []byte @@ -135,5 +134,5 @@ func (cw *cacheWriter) writeToFile(ctx context.Context, taskID string, bytesBuff } } // write to the storage - return cw.cacheDataManager.writeDownloadFile(ctx, taskID, offset, int64(pieceContLen), resultBuf) + return cw.cacheDataManager.writeDownloadFile(taskID, offset, int64(pieceContLen), resultBuf) } diff --git a/cdnsystem/daemon/mgr/cdn/manager.go b/cdnsystem/daemon/mgr/cdn/manager.go index cc2a8aef4..ba53c8a93 100644 --- a/cdnsystem/daemon/mgr/cdn/manager.go +++ b/cdnsystem/daemon/mgr/cdn/manager.go @@ -84,7 +84,7 @@ func (cm *Manager) TriggerCDN(ctx context.Context, task *types.SeedTask) (seedTa cm.cdnLocker.Lock(task.TaskID, false) defer cm.cdnLocker.UnLock(task.TaskID, false) // first: detect Cache - detectResult, err := cm.detector.detectCache(ctx, task) + detectResult, err := cm.detector.detectCache(task) if err != nil { return getUpdateTaskInfoWithStatusOnly(types.TaskInfoCdnStatusFailed), errors.Wrapf(err, "failed to detect cache") } @@ -139,7 +139,7 @@ func (cm *Manager) TriggerCDN(ctx context.Context, task *types.SeedTask) (seedTa } func (cm *Manager) Delete(ctx context.Context, taskID string) error { - err := cm.cacheStore.DeleteTask(ctx, taskID) + err := cm.cacheStore.DeleteTask(taskID) if err != nil { return errors.Wrap(err, "failed to delete task files") } @@ -177,7 +177,7 @@ func (cm *Manager) handleCDNResult(ctx context.Context, task *types.SeedTask, so if !isSuccess { cdnFileLength = 0 } - if err := cm.cacheDataManager.updateStatusAndResult(ctx, task.TaskID, &storage.FileMetaData{ + if err := cm.cacheDataManager.updateStatusAndResult(task.TaskID, &storage.FileMetaData{ Finish: true, Success: isSuccess, SourceRealMd5: sourceMd5, @@ -199,7 +199,7 @@ func (cm *Manager) handleCDNResult(ctx context.Context, task *types.SeedTask, so } func (cm *Manager) updateExpireInfo(ctx context.Context, taskID string, expireInfo map[string]string) { - if err := cm.cacheDataManager.updateExpireInfo(ctx, taskID, expireInfo); err != nil { + if err := cm.cacheDataManager.updateExpireInfo(taskID, expireInfo); err != nil { logger.WithTaskID(taskID).Errorf("failed to update expireInfo(%s): %v", expireInfo, err) } logger.WithTaskID(taskID).Infof("success to update expireInfo(%s)", expireInfo) diff --git a/cdnsystem/daemon/mgr/cdn/storage/disk/disk.go b/cdnsystem/daemon/mgr/cdn/storage/disk/disk.go index d32f1e902..cbe7de411 100644 --- a/cdnsystem/daemon/mgr/cdn/storage/disk/disk.go +++ b/cdnsystem/daemon/mgr/cdn/storage/disk/disk.go @@ -20,12 +20,12 @@ import ( "bytes" "context" "encoding/json" + "fmt" "io" "strings" "time" "d7y.io/dragonfly/v2/cdnsystem/cdnerrors" - "d7y.io/dragonfly/v2/cdnsystem/config" "d7y.io/dragonfly/v2/cdnsystem/daemon/mgr" "d7y.io/dragonfly/v2/cdnsystem/daemon/mgr/cdn/storage" "d7y.io/dragonfly/v2/cdnsystem/daemon/mgr/gc" @@ -40,44 +40,41 @@ import ( "github.com/sirupsen/logrus" ) -const name = "disk" +const StorageMode = "disk" func init() { - var builder *diskBuilder = nil - var _ storage.Builder = builder - var diskStorage *diskStorageMgr = nil var _ storage.Manager = diskStorage var _ gc.Executor = diskStorage + storage.Register(StorageMode, NewStorageManager) } -type diskBuilder struct { -} - -func (*diskBuilder) Build(cfg *config.Config) (storage.Manager, error) { - diskStore, err := storedriver.Get(local.StorageDriver) +func NewStorageManager(cfg *storage.Config) (storage.Manager, error) { + if len(cfg.DriverConfigs) != 1 { + return nil, fmt.Errorf("disk storage manager should have only one disk driver, cfg's driver number is wrong config: %v", cfg) + } + diskDriver, err := storedriver.Get(local.DiskDriverName) if err != nil { - return nil, err + return nil, fmt.Errorf("find disk driver for disk storage manager failed, config parameter is %#v: %v", cfg, err) } + storageMgr := &diskStorageMgr{ - diskStore: diskStore, + cfg: cfg, + diskDriver: diskDriver, } - gc.Register("diskStorage", cfg.GCInitialDelay, cfg.GCStorageInterval, storageMgr) + gc.Register("diskStorage", cfg.GCInitialDelay, cfg.GCInterval, storageMgr) return storageMgr, nil } -func (*diskBuilder) Name() string { - return name -} - type diskStorageMgr struct { - diskStore *storedriver.Store - diskStoreCleaner *storage.Cleaner - taskMgr mgr.SeedTaskMgr + cfg *storage.Config + diskDriver storedriver.Driver + cleaner *storage.Cleaner + taskMgr mgr.SeedTaskMgr } -func (s *diskStorageMgr) getDiskDefaultGcConfig() *storedriver.GcConfig { - totalSpace, err := s.diskStore.GetTotalSpace(context.TODO()) +func (s *diskStorageMgr) getDefaultGcConfig() *storage.GCConfig { + totalSpace, err := s.diskDriver.GetTotalSpace() if err != nil { logger.GcLogger.With("type", "disk").Errorf("get total space of disk: %v", err) } @@ -85,7 +82,7 @@ func (s *diskStorageMgr) getDiskDefaultGcConfig() *storedriver.GcConfig { if totalSpace > 0 && totalSpace/4 < yongGcThreshold { yongGcThreshold = totalSpace / 4 } - return &storedriver.GcConfig{ + return &storage.GCConfig{ YoungGCThreshold: yongGcThreshold, FullGCThreshold: 25 * unit.GB, IntervalThreshold: 2 * time.Hour, @@ -93,30 +90,26 @@ func (s *diskStorageMgr) getDiskDefaultGcConfig() *storedriver.GcConfig { } } -func (s *diskStorageMgr) InitializeCleaners() { - diskGcConfig := s.diskStore.GetGcConfig(context.TODO()) +func (s *diskStorageMgr) Initialize(taskMgr mgr.SeedTaskMgr) { + s.taskMgr = taskMgr + diskGcConfig := s.cfg.DriverConfigs[local.DiskDriverName].GCConfig if diskGcConfig == nil { - diskGcConfig = s.getDiskDefaultGcConfig() + diskGcConfig = s.getDefaultGcConfig() logger.GcLogger.With("type", "disk").Warnf("disk gc config is nil, use default gcConfig: %v", diskGcConfig) } - s.diskStoreCleaner = &storage.Cleaner{ - Cfg: diskGcConfig, - Store: s.diskStore, - StorageMgr: s, - TaskMgr: s.taskMgr, - } + s.cleaner, _ = storage.NewStorageCleaner(diskGcConfig, s.diskDriver, s, taskMgr) } -func (s *diskStorageMgr) AppendPieceMetaData(ctx context.Context, taskID string, pieceRecord *storage.PieceMetaRecord) error { - return s.diskStore.PutBytes(ctx, storage.GetAppendPieceMetaDataRaw(taskID), []byte(pieceRecord.String()+"\n")) +func (s *diskStorageMgr) AppendPieceMetaData(taskID string, pieceRecord *storage.PieceMetaRecord) error { + return s.diskDriver.PutBytes(storage.GetAppendPieceMetaDataRaw(taskID), []byte(pieceRecord.String()+"\n")) } -func (s *diskStorageMgr) ReadPieceMetaRecords(ctx context.Context, taskID string) ([]*storage.PieceMetaRecord, error) { - bytes, err := s.diskStore.GetBytes(ctx, storage.GetPieceMetaDataRaw(taskID)) +func (s *diskStorageMgr) ReadPieceMetaRecords(taskID string) ([]*storage.PieceMetaRecord, error) { + readBytes, err := s.diskDriver.GetBytes(storage.GetPieceMetaDataRaw(taskID)) if err != nil { return nil, err } - pieceMetaRecords := strings.Split(strings.TrimSpace(string(bytes)), "\n") + pieceMetaRecords := strings.Split(strings.TrimSpace(string(readBytes)), "\n") var result = make([]*storage.PieceMetaRecord, 0) for _, pieceStr := range pieceMetaRecords { record, err := storage.ParsePieceMetaRecord(pieceStr) @@ -130,7 +123,7 @@ func (s *diskStorageMgr) ReadPieceMetaRecords(ctx context.Context, taskID string func (s *diskStorageMgr) GC(ctx context.Context) error { logger.GcLogger.With("type", "disk").Info("start the disk storage gc job") - gcTaskIDs, err := s.diskStoreCleaner.Gc(ctx, "disk", false) + gcTaskIDs, err := s.cleaner.GC("disk", false) if err != nil { logger.GcLogger.With("type", "disk").Error("failed to get gcTaskIDs") } @@ -138,7 +131,7 @@ func (s *diskStorageMgr) GC(ctx context.Context) error { for _, taskID := range gcTaskIDs { synclock.Lock(taskID, false) // try to ensure the taskID is not using again - if _, err := s.taskMgr.Get(ctx, taskID); err == nil || !cdnerrors.IsDataNotFound(err) { + if _, err := s.taskMgr.Get(taskID); err == nil || !cdnerrors.IsDataNotFound(err) { if err != nil { logger.GcLogger.With("type", "disk").Errorf("failed to get taskID(%s): %v", taskID, err) } @@ -146,7 +139,7 @@ func (s *diskStorageMgr) GC(ctx context.Context) error { continue } realGCCount++ - if err := s.DeleteTask(ctx, taskID); err != nil { + if err := s.DeleteTask(taskID); err != nil { logger.GcLogger.With("type", "disk").Errorf("failed to delete disk files with taskID(%s): %v", taskID, err) synclock.UnLock(taskID, false) continue @@ -157,19 +150,15 @@ func (s *diskStorageMgr) GC(ctx context.Context) error { return nil } -func (s *diskStorageMgr) SetTaskMgr(mgr mgr.SeedTaskMgr) { - s.taskMgr = mgr -} - -func (s *diskStorageMgr) WriteDownloadFile(ctx context.Context, taskID string, offset int64, len int64, buf *bytes.Buffer) error { +func (s *diskStorageMgr) WriteDownloadFile(taskID string, offset int64, len int64, buf *bytes.Buffer) error { raw := storage.GetDownloadRaw(taskID) raw.Offset = offset raw.Length = len - return s.diskStore.Put(ctx, raw, buf) + return s.diskDriver.Put(raw, buf) } -func (s *diskStorageMgr) ReadFileMetaData(ctx context.Context, taskID string) (*storage.FileMetaData, error) { - bytes, err := s.diskStore.GetBytes(ctx, storage.GetTaskMetaDataRaw(taskID)) +func (s *diskStorageMgr) ReadFileMetaData(taskID string) (*storage.FileMetaData, error) { + bytes, err := s.diskDriver.GetBytes(storage.GetTaskMetaDataRaw(taskID)) if err != nil { return nil, errors.Wrapf(err, "failed to get metadata bytes") } @@ -181,15 +170,15 @@ func (s *diskStorageMgr) ReadFileMetaData(ctx context.Context, taskID string) (* return metaData, nil } -func (s *diskStorageMgr) WriteFileMetaData(ctx context.Context, taskID string, metaData *storage.FileMetaData) error { +func (s *diskStorageMgr) WriteFileMetaData(taskID string, metaData *storage.FileMetaData) error { data, err := json.Marshal(metaData) if err != nil { return errors.Wrapf(err, "failed to marshal metadata") } - return s.diskStore.PutBytes(ctx, storage.GetTaskMetaDataRaw(taskID), data) + return s.diskDriver.PutBytes(storage.GetTaskMetaDataRaw(taskID), data) } -func (s *diskStorageMgr) WritePieceMetaRecords(ctx context.Context, taskID string, records []*storage.PieceMetaRecord) error { +func (s *diskStorageMgr) WritePieceMetaRecords(taskID string, records []*storage.PieceMetaRecord) error { recordStrs := make([]string, 0, len(records)) for i := range records { recordStrs = append(recordStrs, records[i].String()) @@ -197,55 +186,50 @@ func (s *diskStorageMgr) WritePieceMetaRecords(ctx context.Context, taskID strin pieceRaw := storage.GetPieceMetaDataRaw(taskID) pieceRaw.Trunc = true pieceRaw.TruncSize = 0 - return s.diskStore.PutBytes(ctx, pieceRaw, []byte(strings.Join(recordStrs, "\n"))) + return s.diskDriver.PutBytes(pieceRaw, []byte(strings.Join(recordStrs, "\n"))) } -func (s *diskStorageMgr) ReadPieceMetaBytes(ctx context.Context, taskID string) ([]byte, error) { - return s.diskStore.GetBytes(ctx, storage.GetPieceMetaDataRaw(taskID)) +func (s *diskStorageMgr) ReadPieceMetaBytes(taskID string) ([]byte, error) { + return s.diskDriver.GetBytes(storage.GetPieceMetaDataRaw(taskID)) } -func (s *diskStorageMgr) ReadDownloadFile(ctx context.Context, taskID string) (io.ReadCloser, error) { - return s.diskStore.Get(ctx, storage.GetDownloadRaw(taskID)) +func (s *diskStorageMgr) ReadDownloadFile(taskID string) (io.ReadCloser, error) { + return s.diskDriver.Get(storage.GetDownloadRaw(taskID)) } -func (s *diskStorageMgr) StatDownloadFile(ctx context.Context, taskID string) (*storedriver.StorageInfo, error) { - return s.diskStore.Stat(ctx, storage.GetDownloadRaw(taskID)) +func (s *diskStorageMgr) StatDownloadFile(taskID string) (*storedriver.StorageInfo, error) { + return s.diskDriver.Stat(storage.GetDownloadRaw(taskID)) } -func (s *diskStorageMgr) CreateUploadLink(ctx context.Context, taskID string) error { +func (s *diskStorageMgr) CreateUploadLink(taskID string) error { // create a soft link from the upload file to the download file - if err := fileutils.SymbolicLink(s.diskStore.GetPath(storage.GetDownloadRaw(taskID)), - s.diskStore.GetPath(storage.GetUploadRaw(taskID))); err != nil { + if err := fileutils.SymbolicLink(s.diskDriver.GetPath(storage.GetDownloadRaw(taskID)), + s.diskDriver.GetPath(storage.GetUploadRaw(taskID))); err != nil { return err } return nil } -func (s *diskStorageMgr) DeleteTask(ctx context.Context, taskID string) error { - if err := s.diskStore.Remove(ctx, storage.GetTaskMetaDataRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { - errors.Cause(err) +func (s *diskStorageMgr) DeleteTask(taskID string) error { + if err := s.diskDriver.Remove(storage.GetTaskMetaDataRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { return err } - if err := s.diskStore.Remove(ctx, storage.GetPieceMetaDataRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { + if err := s.diskDriver.Remove(storage.GetPieceMetaDataRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { return err } - if err := s.diskStore.Remove(ctx, storage.GetDownloadRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { + if err := s.diskDriver.Remove(storage.GetDownloadRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { return err } - if err := s.diskStore.Remove(ctx, storage.GetUploadRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { + if err := s.diskDriver.Remove(storage.GetUploadRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { return err } // try to clean the parent bucket - if err := s.diskStore.Remove(ctx, storage.GetParentRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { + if err := s.diskDriver.Remove(storage.GetParentRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { logrus.Warnf("taskID:%s failed remove parent bucket:%v", taskID, err) } return nil } -func (s *diskStorageMgr) ResetRepo(ctx context.Context, task *types.SeedTask) error { - return s.DeleteTask(ctx, task.TaskID) -} - -func init() { - storage.Register(&diskBuilder{}) +func (s *diskStorageMgr) ResetRepo(task *types.SeedTask) error { + return s.DeleteTask(task.TaskID) } diff --git a/cdnsystem/daemon/mgr/cdn/storage/hybrid/hybrid.go b/cdnsystem/daemon/mgr/cdn/storage/hybrid/hybrid.go index cb39c9bb3..f82ed7b30 100644 --- a/cdnsystem/daemon/mgr/cdn/storage/hybrid/hybrid.go +++ b/cdnsystem/daemon/mgr/cdn/storage/hybrid/hybrid.go @@ -29,7 +29,6 @@ import ( "time" "d7y.io/dragonfly/v2/cdnsystem/cdnerrors" - "d7y.io/dragonfly/v2/cdnsystem/config" "d7y.io/dragonfly/v2/cdnsystem/daemon/mgr" "d7y.io/dragonfly/v2/cdnsystem/daemon/mgr/cdn/storage" "d7y.io/dragonfly/v2/cdnsystem/daemon/mgr/gc" @@ -44,52 +43,108 @@ import ( "go.uber.org/atomic" ) -const name = "hybrid" +const StorageMode = "hybrid" const secureLevel = 500 * unit.MB func init() { - var builder *hybridBuilder = nil - var _ storage.Builder = builder +} + +func init() { var hybrid *hybridStorageMgr = nil var _ storage.Manager = hybrid var _ gc.Executor = hybrid + storage.Register(StorageMode, NewStorageManager) } -type hybridBuilder struct { -} - -func (*hybridBuilder) Build(cfg *config.Config) (storage.Manager, error) { - diskStore, err := storedriver.Get(local.StorageDriver) +// NewStorageManager performs initialization for storage manager and return a storage Manager. +func NewStorageManager(cfg *storage.Config) (storage.Manager, error) { + if len(cfg.DriverConfigs) != 2 { + return nil, fmt.Errorf("disk storage manager should have two driver, cfg's driver number is wrong : %v", cfg) + } + diskDriver, err := storedriver.Get(local.DiskDriverName) if err != nil { - return nil, err + return nil, fmt.Errorf("find disk driver for hybrid storage manager failed, config %v: %v", cfg, err) } - memoryStore, err := storedriver.Get(local.MemoryStorageDriver) + memoryDriver, err := storedriver.Get(local.MemoryDriverName) if err != nil { - return nil, err + return nil, fmt.Errorf("find memory driver for hybrid storage manager failed, config %v: %v", cfg, err) } storageMgr := &hybridStorageMgr{ - memoryStore: memoryStore, - diskStore: diskStore, - hasShm: true, - shmSwitch: newShmSwitch(), + cfg: cfg, + memoryDriver: memoryDriver, + diskDriver: diskDriver, + hasShm: true, + shmSwitch: newShmSwitch(), } - gc.Register("hybridStorage", cfg.GCInitialDelay, cfg.GCStorageInterval, storageMgr) + gc.Register("hybridStorage", cfg.GCInitialDelay, cfg.GCInterval, storageMgr) return storageMgr, nil } -func (*hybridBuilder) Name() string { - return name +func (h *hybridStorageMgr) Initialize(taskMgr mgr.SeedTaskMgr) { + h.taskMgr = taskMgr + diskGcConfig := h.cfg.DriverConfigs[local.DiskDriverName].GCConfig + + if diskGcConfig == nil { + diskGcConfig = h.getDiskDefaultGcConfig() + logger.GcLogger.With("type", "hybrid").Warnf("disk gc config is nil, use default gcConfig: %v", diskGcConfig) + } + h.diskStoreCleaner, _ = storage.NewStorageCleaner(diskGcConfig, h.diskDriver, h, taskMgr) + memoryGcConfig := h.cfg.DriverConfigs[local.MemoryDriverName].GCConfig + if memoryGcConfig == nil { + memoryGcConfig = h.getMemoryDefaultGcConfig() + logger.GcLogger.With("type", "hybrid").Warnf("memory gc config is nil, use default gcConfig: %v", diskGcConfig) + } + h.memoryStoreCleaner, _ = storage.NewStorageCleaner(memoryGcConfig, h.memoryDriver, h, taskMgr) + logger.GcLogger.With("type", "hybrid").Info("success initialize hybrid cleaners") +} + +func (h *hybridStorageMgr) getDiskDefaultGcConfig() *storage.GCConfig { + totalSpace, err := h.diskDriver.GetTotalSpace() + if err != nil { + logger.GcLogger.With("type", "hybrid").Errorf("failed to get total space of disk: %v", err) + } + yongGcThreshold := 200 * unit.GB + if totalSpace > 0 && totalSpace/4 < yongGcThreshold { + yongGcThreshold = totalSpace / 4 + } + return &storage.GCConfig{ + YoungGCThreshold: yongGcThreshold, + FullGCThreshold: 25 * unit.GB, + IntervalThreshold: 2 * time.Hour, + CleanRatio: 1, + } +} + +func (h *hybridStorageMgr) getMemoryDefaultGcConfig() *storage.GCConfig { + // determine whether the shared cache can be used + diff := unit.Bytes(0) + totalSpace, err := h.memoryDriver.GetTotalSpace() + if err != nil { + logger.GcLogger.With("type", "hybrid").Errorf("failed to get total space of memory: %v", err) + } + if totalSpace < 72*unit.GB { + diff = 72*unit.GB - totalSpace + } + if diff >= totalSpace { + h.hasShm = false + } + return &storage.GCConfig{ + YoungGCThreshold: 10*unit.GB + diff, + FullGCThreshold: 2*unit.GB + diff, + CleanRatio: 3, + IntervalThreshold: 2 * time.Hour, + } } type hybridStorageMgr struct { - cfg config.Config - memoryStore storedriver.Driver - diskStore storedriver.Driver - taskMgr mgr.SeedTaskMgr + cfg *storage.Config + memoryDriver storedriver.Driver + diskDriver storedriver.Driver diskStoreCleaner *storage.Cleaner memoryStoreCleaner *storage.Cleaner + taskMgr mgr.SeedTaskMgr shmSwitch *shmSwitch hasShm bool } @@ -100,7 +155,7 @@ func (h *hybridStorageMgr) GC(ctx context.Context) error { wg.Add(1) go func() { defer wg.Done() - gcTaskIDs, err := h.diskStoreCleaner.Gc(ctx, "hybrid", false) + gcTaskIDs, err := h.diskStoreCleaner.GC("hybrid", false) if err != nil { logger.GcLogger.With("type", "hybrid").Error("gc disk: failed to get gcTaskIds") } @@ -111,7 +166,7 @@ func (h *hybridStorageMgr) GC(ctx context.Context) error { wg.Add(1) go func() { defer wg.Done() - gcTaskIDs, err := h.memoryStoreCleaner.Gc(ctx, "hybrid", false) + gcTaskIDs, err := h.memoryStoreCleaner.GC("hybrid", false) logger.GcLogger.With("type", "hybrid").Infof("at most %d tasks can be cleaned up from memory", len(gcTaskIDs)) if err != nil { logger.GcLogger.With("type", "hybrid").Error("gc memory: failed to get gcTaskIds") @@ -128,7 +183,7 @@ func (h *hybridStorageMgr) gcTasks(ctx context.Context, gcTaskIDs []string, isDi for _, taskID := range gcTaskIDs { synclock.Lock(taskID, false) // try to ensure the taskID is not using again - if _, err := h.taskMgr.Get(ctx, taskID); err == nil || !cdnerrors.IsDataNotFound(err) { + if _, err := h.taskMgr.Get(taskID); err == nil || !cdnerrors.IsDataNotFound(err) { if err != nil { logger.GcLogger.With("type", "hybrid").Errorf("gc disk: failed to get taskID(%s): %v", taskID, err) } @@ -137,13 +192,13 @@ func (h *hybridStorageMgr) gcTasks(ctx context.Context, gcTaskIDs []string, isDi } realGCCount++ if isDisk { - if err := h.deleteDiskFiles(ctx, taskID); err != nil { + if err := h.deleteDiskFiles(taskID); err != nil { logger.GcLogger.With("type", "hybrid").Errorf("gc disk: failed to delete disk files with taskID(%s): %v", taskID, err) synclock.UnLock(taskID, false) continue } } else { - if err := h.deleteMemoryFiles(ctx, taskID); err != nil { + if err := h.deleteMemoryFiles(taskID); err != nil { logger.GcLogger.With("type", "hybrid").Errorf("gc memory: failed to delete memory files with taskID(%s): %v", taskID, err) synclock.UnLock(taskID, false) continue @@ -154,49 +209,27 @@ func (h *hybridStorageMgr) gcTasks(ctx context.Context, gcTaskIDs []string, isDi return realGCCount } -func (h *hybridStorageMgr) SetTaskMgr(taskMgr mgr.SeedTaskMgr) { - h.taskMgr = taskMgr -} - -func (h *hybridStorageMgr) InitializeCleaners() { - diskGcConfig := h.diskStore.GetGcConfig(context.TODO()) - if diskGcConfig == nil { - diskGcConfig = h.getDiskDefaultGcConfig() - logger.GcLogger.With("type", "hybrid").Warnf("disk gc config is nil, use default gcConfig: %v", diskGcConfig) - } - - h.diskStoreCleaner = storage.NewStorageCleaner(diskGcConfig, h.diskStore, h, h.taskMgr) - memoryGcConfig := h.memoryStore.GetGcConfig(context.TODO()) - if memoryGcConfig == nil { - memoryGcConfig = h.getMemoryDefaultGcConfig() - logger.GcLogger.With("type", "hybrid").Warnf("memory gc config is nil, use default gcConfig: %v", diskGcConfig) - } - h.memoryStoreCleaner = storage.NewStorageCleaner(memoryGcConfig, h.memoryStore, h, h.taskMgr) - logger.GcLogger.With("type", "hybrid").Info("success initialize hybrid cleaners") -} - -func (h *hybridStorageMgr) WriteDownloadFile(ctx context.Context, taskID string, offset int64, len int64, - buf *bytes.Buffer) error { +func (h *hybridStorageMgr) WriteDownloadFile(taskID string, offset int64, len int64, buf *bytes.Buffer) error { raw := storage.GetDownloadRaw(taskID) raw.Offset = offset raw.Length = len - return h.diskStore.Put(ctx, raw, buf) + return h.diskDriver.Put(raw, buf) } -func (h *hybridStorageMgr) DeleteTask(ctx context.Context, taskID string) error { - return h.deleteTaskFiles(ctx, taskID, true, true) +func (h *hybridStorageMgr) DeleteTask(taskID string) error { + return h.deleteTaskFiles(taskID, true, true) } -func (h *hybridStorageMgr) ReadDownloadFile(ctx context.Context, taskID string) (io.ReadCloser, error) { - return h.diskStore.Get(ctx, storage.GetDownloadRaw(taskID)) +func (h *hybridStorageMgr) ReadDownloadFile(taskID string) (io.ReadCloser, error) { + return h.diskDriver.Get(storage.GetDownloadRaw(taskID)) } -func (h *hybridStorageMgr) ReadPieceMetaRecords(ctx context.Context, taskID string) ([]*storage.PieceMetaRecord, error) { - bytes, err := h.diskStore.GetBytes(ctx, storage.GetPieceMetaDataRaw(taskID)) +func (h *hybridStorageMgr) ReadPieceMetaRecords(taskID string) ([]*storage.PieceMetaRecord, error) { + readBytes, err := h.diskDriver.GetBytes(storage.GetPieceMetaDataRaw(taskID)) if err != nil { return nil, err } - pieceMetaRecords := strings.Split(strings.TrimSpace(string(bytes)), "\n") + pieceMetaRecords := strings.Split(strings.TrimSpace(string(readBytes)), "\n") var result = make([]*storage.PieceMetaRecord, 0) for _, pieceStr := range pieceMetaRecords { record, err := storage.ParsePieceMetaRecord(pieceStr) @@ -208,124 +241,125 @@ func (h *hybridStorageMgr) ReadPieceMetaRecords(ctx context.Context, taskID stri return result, nil } -func (h *hybridStorageMgr) ReadFileMetaData(ctx context.Context, taskID string) (*storage.FileMetaData, error) { - bytes, err := h.diskStore.GetBytes(ctx, storage.GetTaskMetaDataRaw(taskID)) +func (h *hybridStorageMgr) ReadFileMetaData(taskID string) (*storage.FileMetaData, error) { + readBytes, err := h.diskDriver.GetBytes(storage.GetTaskMetaDataRaw(taskID)) if err != nil { return nil, errors.Wrapf(err, "failed to get metadata bytes") } metaData := &storage.FileMetaData{} - if err := json.Unmarshal(bytes, metaData); err != nil { + if err := json.Unmarshal(readBytes, metaData); err != nil { return nil, errors.Wrapf(err, "failed to unmarshal metadata bytes") } return metaData, nil } -func (h *hybridStorageMgr) AppendPieceMetaData(ctx context.Context, taskID string, record *storage.PieceMetaRecord) error { - return h.diskStore.PutBytes(ctx, storage.GetAppendPieceMetaDataRaw(taskID), []byte(record.String()+"\n")) +func (h *hybridStorageMgr) AppendPieceMetaData(taskID string, record *storage.PieceMetaRecord) error { + return h.diskDriver.PutBytes(storage.GetAppendPieceMetaDataRaw(taskID), []byte(record.String()+"\n")) } -func (h *hybridStorageMgr) WriteFileMetaData(ctx context.Context, taskID string, metaData *storage.FileMetaData) error { +func (h *hybridStorageMgr) WriteFileMetaData(taskID string, metaData *storage.FileMetaData) error { data, err := json.Marshal(metaData) if err != nil { return errors.Wrapf(err, "failed to marshal metadata") } - return h.diskStore.PutBytes(ctx, storage.GetTaskMetaDataRaw(taskID), data) + return h.diskDriver.PutBytes(storage.GetTaskMetaDataRaw(taskID), data) } -func (h *hybridStorageMgr) WritePieceMetaRecords(ctx context.Context, taskID string, records []*storage.PieceMetaRecord) error { - recordStrs := make([]string, 0, len(records)) +func (h *hybridStorageMgr) WritePieceMetaRecords(taskID string, records []*storage.PieceMetaRecord) error { + recordStrings := make([]string, 0, len(records)) for i := range records { - recordStrs = append(recordStrs, records[i].String()) + recordStrings = append(recordStrings, records[i].String()) } - return h.diskStore.PutBytes(ctx, storage.GetPieceMetaDataRaw(taskID), []byte(strings.Join(recordStrs, "\n"))) + return h.diskDriver.PutBytes(storage.GetPieceMetaDataRaw(taskID), []byte(strings.Join(recordStrings, "\n"))) } -func (h *hybridStorageMgr) CreateUploadLink(ctx context.Context, taskID string) error { +func (h *hybridStorageMgr) CreateUploadLink(taskID string) error { // create a soft link from the upload file to the download file - if err := fileutils.SymbolicLink(h.diskStore.GetPath(storage.GetDownloadRaw(taskID)), - h.diskStore.GetPath(storage.GetUploadRaw(taskID))); err != nil { + if err := fileutils.SymbolicLink(h.diskDriver.GetPath(storage.GetDownloadRaw(taskID)), + h.diskDriver.GetPath(storage.GetUploadRaw(taskID))); err != nil { return err } return nil } -func (h *hybridStorageMgr) ResetRepo(ctx context.Context, task *types.SeedTask) error { - if err := h.deleteTaskFiles(ctx, task.TaskID, false, true); err != nil { +func (h *hybridStorageMgr) ResetRepo(task *types.SeedTask) error { + if err := h.deleteTaskFiles(task.TaskID, false, true); err != nil { logger.WithTaskID(task.TaskID).Errorf("reset repo: failed to delete task files: %v", err) } // 判断是否有足够空间存放 - shmPath, err := h.tryShmSpace(ctx, task.URL, task.TaskID, task.SourceFileLength) + shmPath, err := h.tryShmSpace(task.URL, task.TaskID, task.SourceFileLength) if err == nil { - return fileutils.SymbolicLink(shmPath, h.diskStore.GetPath(storage.GetDownloadRaw(task.TaskID))) + return fileutils.SymbolicLink(shmPath, h.diskDriver.GetPath(storage.GetDownloadRaw(task.TaskID))) } return nil } func (h *hybridStorageMgr) GetDownloadPath(rawFunc *storedriver.Raw) string { - return h.diskStore.GetPath(rawFunc) + return h.diskDriver.GetPath(rawFunc) } -func (h *hybridStorageMgr) StatDownloadFile(ctx context.Context, taskID string) (*storedriver.StorageInfo, error) { - return h.diskStore.Stat(ctx, storage.GetDownloadRaw(taskID)) +func (h *hybridStorageMgr) StatDownloadFile(taskID string) (*storedriver.StorageInfo, error) { + return h.diskDriver.Stat(storage.GetDownloadRaw(taskID)) } -func (h *hybridStorageMgr) deleteDiskFiles(ctx context.Context, taskID string) error { - return h.deleteTaskFiles(ctx, taskID, true, true) +func (h *hybridStorageMgr) deleteDiskFiles(taskID string) error { + return h.deleteTaskFiles(taskID, true, true) } -func (h *hybridStorageMgr) deleteMemoryFiles(ctx context.Context, taskID string) error { - return h.deleteTaskFiles(ctx, taskID, true, false) +func (h *hybridStorageMgr) deleteMemoryFiles(taskID string) error { + return h.deleteTaskFiles(taskID, true, false) } -func (h *hybridStorageMgr) deleteTaskFiles(ctx context.Context, taskID string, deleteUploadPath bool, deleteHardLink bool) error { +func (h *hybridStorageMgr) deleteTaskFiles(taskID string, deleteUploadPath bool, deleteHardLink bool) error { // delete task file data - if err := h.diskStore.Remove(ctx, storage.GetDownloadRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { + if err := h.diskDriver.Remove(storage.GetDownloadRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { return err } // delete memory file - if err := h.memoryStore.Remove(ctx, storage.GetDownloadRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { + if err := h.memoryDriver.Remove(storage.GetDownloadRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { return err } if deleteUploadPath { - if err := h.diskStore.Remove(ctx, storage.GetUploadRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { + if err := h.diskDriver.Remove(storage.GetUploadRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { return err } } - exists := h.diskStore.Exits(ctx, getHardLinkRaw(taskID)) + exists := h.diskDriver.Exits(getHardLinkRaw(taskID)) if !deleteHardLink && exists { - h.diskStore.MoveFile(h.diskStore.GetPath(getHardLinkRaw(taskID)), h.diskStore.GetPath(storage.GetDownloadRaw( - taskID))) + if err := h.diskDriver.MoveFile(h.diskDriver.GetPath(getHardLinkRaw(taskID)), h.diskDriver.GetPath(storage.GetDownloadRaw(taskID))); err != nil { + return err + } } else { - if err := h.diskStore.Remove(ctx, getHardLinkRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { + if err := h.diskDriver.Remove(getHardLinkRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { return err } // deleteTaskFiles delete files associated with taskID - if err := h.diskStore.Remove(ctx, storage.GetTaskMetaDataRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { + if err := h.diskDriver.Remove(storage.GetTaskMetaDataRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { return err } // delete piece meta data - if err := h.diskStore.Remove(ctx, storage.GetPieceMetaDataRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { + if err := h.diskDriver.Remove(storage.GetPieceMetaDataRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { return err } } // try to clean the parent bucket - if err := h.diskStore.Remove(ctx, storage.GetParentRaw(taskID)); err != nil && + if err := h.diskDriver.Remove(storage.GetParentRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { logger.WithTaskID(taskID).Warnf("failed to remove parent bucket:%v", err) } return nil } -func (h *hybridStorageMgr) tryShmSpace(ctx context.Context, url, taskID string, fileLength int64) (string, error) { +func (h *hybridStorageMgr) tryShmSpace(url, taskID string, fileLength int64) (string, error) { if h.shmSwitch.check(url, fileLength) && h.hasShm { remainder := atomic.NewInt64(0) - h.memoryStore.Walk(ctx, &storedriver.Raw{ + h.memoryDriver.Walk(&storedriver.Raw{ WalkFn: func(filePath string, info os.FileInfo, err error) error { if fileutils.IsRegular(filePath) { taskID := path.Base(filePath) - task, err := h.taskMgr.Get(ctx, taskID) + task, err := h.taskMgr.Get(taskID) if err == nil { var totalLen int64 = 0 if task.CdnFileLength > 0 { @@ -343,65 +377,27 @@ func (h *hybridStorageMgr) tryShmSpace(ctx context.Context, url, taskID string, return nil }, }) - canUseShm := h.getMemoryUsableSpace(ctx)-unit.Bytes(remainder.Load())-secureLevel >= unit.Bytes( + canUseShm := h.getMemoryUsableSpace()-unit.Bytes(remainder.Load())-secureLevel >= unit.Bytes( fileLength) if !canUseShm { // 如果剩余空间过小,则强制执行一次fullgc后在检查是否满足 - h.memoryStoreCleaner.Gc(ctx, "hybrid", true) - canUseShm = h.getMemoryUsableSpace(ctx)-unit.Bytes(remainder.Load())-secureLevel >= unit.Bytes( + h.memoryStoreCleaner.GC("hybrid", true) + canUseShm = h.getMemoryUsableSpace()-unit.Bytes(remainder.Load())-secureLevel >= unit.Bytes( fileLength) } if canUseShm { // 创建shm raw := &storedriver.Raw{ Key: taskID, } - return h.memoryStore.GetPath(raw), nil + return h.memoryDriver.GetPath(raw), nil } return "", fmt.Errorf("not enough free space left") } return "", fmt.Errorf("shared memory is not allowed") } -func (h *hybridStorageMgr) getDiskDefaultGcConfig() *storedriver.GcConfig { - totalSpace, err := h.diskStore.GetTotalSpace(context.TODO()) - if err != nil { - logger.GcLogger.With("type", "hybrid").Errorf("failed to get total space of disk: %v", err) - } - yongGcThreshold := 200 * unit.GB - if totalSpace > 0 && totalSpace/4 < yongGcThreshold { - yongGcThreshold = totalSpace / 4 - } - return &storedriver.GcConfig{ - YoungGCThreshold: yongGcThreshold, - FullGCThreshold: 25 * unit.GB, - IntervalThreshold: 2 * time.Hour, - CleanRatio: 1, - } -} - -func (h *hybridStorageMgr) getMemoryDefaultGcConfig() *storedriver.GcConfig { - // determine whether the shared cache can be used - diff := unit.Bytes(0) - totalSpace, err := h.memoryStore.GetTotalSpace(context.TODO()) - if err != nil { - logger.GcLogger.With("type", "hybrid").Errorf("failed to get total space of memory: %v", err) - } - if totalSpace < 72*unit.GB { - diff = 72*unit.GB - totalSpace - } - if diff >= totalSpace { - h.hasShm = false - } - return &storedriver.GcConfig{ - YoungGCThreshold: 10*unit.GB + diff, - FullGCThreshold: 2*unit.GB + diff, - CleanRatio: 3, - IntervalThreshold: 2 * time.Hour, - } -} - -func (h *hybridStorageMgr) getMemoryUsableSpace(ctx context.Context) unit.Bytes { - totalSize, freeSize, err := h.memoryStore.GetTotalAndFreeSpace(ctx) +func (h *hybridStorageMgr) getMemoryUsableSpace() unit.Bytes { + totalSize, freeSize, err := h.memoryDriver.GetTotalAndFreeSpace() if err != nil { logger.GcLogger.With("type", "hybrid").Errorf("failed to get total and free space of memory: %v", err) return 0 @@ -425,7 +421,3 @@ func getHardLinkRaw(taskID string) *storedriver.Raw { raw.Key = raw.Key + ".hard" return raw } - -func init() { - storage.Register(&hybridBuilder{}) -} diff --git a/cdnsystem/daemon/mgr/cdn/storage/storage_gc.go b/cdnsystem/daemon/mgr/cdn/storage/storage_gc.go index 92cdbb79c..f363a4f73 100644 --- a/cdnsystem/daemon/mgr/cdn/storage/storage_gc.go +++ b/cdnsystem/daemon/mgr/cdn/storage/storage_gc.go @@ -17,7 +17,6 @@ package storage import ( - "context" "os" "strings" "time" @@ -33,40 +32,40 @@ import ( ) type Cleaner struct { - Cfg *storedriver.GcConfig - Store storedriver.Driver - StorageMgr Manager - TaskMgr mgr.SeedTaskMgr + cfg *GCConfig + driver storedriver.Driver + taskMgr mgr.SeedTaskMgr + storageMgr Manager } -func NewStorageCleaner(gcConfig *storedriver.GcConfig, store storedriver.Driver, storageMgr Manager, taskMgr mgr.SeedTaskMgr) *Cleaner { +func NewStorageCleaner(cfg *GCConfig, driver storedriver.Driver, storageMgr Manager, taskMgr mgr.SeedTaskMgr) (*Cleaner, error) { return &Cleaner{ - Cfg: gcConfig, - Store: store, - StorageMgr: storageMgr, - TaskMgr: taskMgr, - } + cfg: cfg, + driver: driver, + taskMgr: taskMgr, + storageMgr: storageMgr, + }, nil } -func (cleaner *Cleaner) Gc(ctx context.Context, storagePattern string, force bool) ([]string, error) { - freeSpace, err := cleaner.Store.GetAvailSpace(ctx) +func (cleaner *Cleaner) GC(storagePattern string, force bool) ([]string, error) { + freeSpace, err := cleaner.driver.GetAvailSpace() if err != nil { if cdnerrors.IsFileNotExist(err) { - err = cleaner.Store.CreateBaseDir(ctx) + err = cleaner.driver.CreateBaseDir() if err != nil { return nil, err } - freeSpace, _ = cleaner.Store.GetAvailSpace(ctx) + freeSpace, _ = cleaner.driver.GetAvailSpace() } else { return nil, errors.Wrapf(err, "failed to get avail space") } } fullGC := force if !fullGC { - if freeSpace > cleaner.Cfg.YoungGCThreshold { + if freeSpace > cleaner.cfg.YoungGCThreshold { return nil, nil } - if freeSpace <= cleaner.Cfg.FullGCThreshold { + if freeSpace <= cleaner.cfg.FullGCThreshold { fullGC = true } } @@ -98,7 +97,7 @@ func (cleaner *Cleaner) Gc(ctx context.Context, storagePattern string, force boo walkTaskIds[taskID] = true // we should return directly when we success to get info which means it is being used - if _, err := cleaner.TaskMgr.Get(ctx, taskID); err == nil || !cdnerrors.IsDataNotFound(err) { + if _, err := cleaner.taskMgr.Get(taskID); err == nil || !cdnerrors.IsDataNotFound(err) { if err != nil { logger.GcLogger.With("type", storagePattern).Errorf("failed to get taskID(%s): %v", taskID, err) } @@ -111,21 +110,21 @@ func (cleaner *Cleaner) Gc(ctx context.Context, storagePattern string, force boo return nil } - metaData, err := cleaner.StorageMgr.ReadFileMetaData(ctx, taskID) + metaData, err := cleaner.storageMgr.ReadFileMetaData(taskID) if err != nil || metaData == nil { logger.GcLogger.With("type", storagePattern).Debugf("taskID: %s, failed to get metadata: %v", taskID, err) gcTaskIDs = append(gcTaskIDs, taskID) return nil } // put taskId into gapTasks or intervalTasks which will sort by some rules - if err := cleaner.sortInert(ctx, gapTasks, intervalTasks, metaData); err != nil { + if err := cleaner.sortInert(gapTasks, intervalTasks, metaData); err != nil { logger.GcLogger.With("type", storagePattern).Errorf("failed to parse inert metaData(%+v): %v", metaData, err) } return nil } - if err := cleaner.Store.Walk(ctx, &storedriver.Raw{ + if err := cleaner.driver.Walk(&storedriver.Raw{ WalkFn: walkFn, }); err != nil { return nil, err @@ -138,13 +137,12 @@ func (cleaner *Cleaner) Gc(ctx context.Context, storagePattern string, force boo return gcTaskIDs, nil } -func (cleaner *Cleaner) sortInert(ctx context.Context, gapTasks, intervalTasks *treemap.Map, - metaData *FileMetaData) error { +func (cleaner *Cleaner) sortInert(gapTasks, intervalTasks *treemap.Map, metaData *FileMetaData) error { gap := timeutils.CurrentTimeMillis() - metaData.AccessTime if metaData.Interval > 0 && - gap <= metaData.Interval+(int64(cleaner.Cfg.IntervalThreshold.Seconds())*int64(time.Millisecond)) { - info, err := cleaner.StorageMgr.StatDownloadFile(ctx, metaData.TaskID) + gap <= metaData.Interval+(int64(cleaner.cfg.IntervalThreshold.Seconds())*int64(time.Millisecond)) { + info, err := cleaner.storageMgr.StatDownloadFile(metaData.TaskID) if err != nil { return err } @@ -184,6 +182,6 @@ func (cleaner *Cleaner) getGCTasks(gapTasks, intervalTasks *treemap.Map) []strin } } - gcLen := (len(gcTasks)*cleaner.Cfg.CleanRatio + 9) / 10 + gcLen := (len(gcTasks)*cleaner.cfg.CleanRatio + 9) / 10 return gcTasks[0:gcLen] } diff --git a/cdnsystem/daemon/mgr/cdn/storage/storage_mgr.go b/cdnsystem/daemon/mgr/cdn/storage/storage_mgr.go index 61e9d8439..07facf934 100644 --- a/cdnsystem/daemon/mgr/cdn/storage/storage_mgr.go +++ b/cdnsystem/daemon/mgr/cdn/storage/storage_mgr.go @@ -18,52 +18,62 @@ package storage import ( "bytes" - "context" "fmt" "io" + "reflect" "strconv" "strings" + "time" - "d7y.io/dragonfly/v2/cdnsystem/config" "d7y.io/dragonfly/v2/cdnsystem/daemon/mgr" + "d7y.io/dragonfly/v2/cdnsystem/plugins" "d7y.io/dragonfly/v2/cdnsystem/storedriver" "d7y.io/dragonfly/v2/cdnsystem/types" - logger "d7y.io/dragonfly/v2/pkg/dflog" + "d7y.io/dragonfly/v2/pkg/unit" "d7y.io/dragonfly/v2/pkg/util/rangeutils" - "d7y.io/dragonfly/v2/pkg/util/stringutils" + "github.com/mitchellh/mapstructure" "github.com/pkg/errors" + "gopkg.in/yaml.v3" ) -var ( - builderMap = make(map[string]Builder) - defaultStorage = "disk" -) +type Manager interface { + Initialize(taskMgr mgr.SeedTaskMgr) -func Register(b Builder) { - builderMap[strings.ToLower(b.Name())] = b -} + // ResetRepo reset the storage of task + ResetRepo(*types.SeedTask) error -func getBuilder(name string, defaultIfAbsent bool) Builder { - if b, ok := builderMap[strings.ToLower(name)]; ok { - return b - } - if stringutils.IsBlank(name) && defaultIfAbsent { - return builderMap[defaultStorage] - } - return nil -} + // StatDownloadFile + StatDownloadFile(taskID string) (*storedriver.StorageInfo, error) -// Builder creates a storage -type Builder interface { - Build(cfg *config.Config) (Manager, error) + // WriteDownloadFile + WriteDownloadFile(taskID string, offset int64, len int64, buf *bytes.Buffer) error - Name() string -} + // ReadDownloadFile + ReadDownloadFile(taskID string) (io.ReadCloser, error) + + // CreateUploadLink + CreateUploadLink(taskID string) error -type BuildOptions interface { + // ReadFileMetaData + ReadFileMetaData(taskID string) (*FileMetaData, error) + + // WriteFileMetaData + WriteFileMetaData(taskID string, meta *FileMetaData) error + + // WritePieceMetaRecords + WritePieceMetaRecords(taskID string, metaRecords []*PieceMetaRecord) error + + // AppendPieceMetaData + AppendPieceMetaData(taskID string, metaRecord *PieceMetaRecord) error + + // ReadPieceMetaRecords + ReadPieceMetaRecords(taskID string) ([]*PieceMetaRecord, error) + + // DeleteTask + DeleteTask(taskID string) error } -// fileMetaData +// FileMetaData type FileMetaData struct { TaskID string `json:"taskId"` TaskURL string `json:"taskUrl"` @@ -81,8 +91,6 @@ type FileMetaData struct { //PieceMetaDataSign string `json:"pieceMetaDataSign"` } -const fieldSeparator = ":" - // pieceMetaRecord type PieceMetaRecord struct { PieceNum int32 `json:"pieceNum"` // piece Num start from 0 @@ -93,9 +101,11 @@ type PieceMetaRecord struct { PieceStyle types.PieceFormat `json:"pieceStyle"` // 1: PlainUnspecified } +const fieldSeparator = ":" + func (record PieceMetaRecord) String() string { - return fmt.Sprintf("%d%s%d%s%s%s%s%s%s%s%d", record.PieceNum, fieldSeparator, record.PieceLen, fieldSeparator, record.Md5, fieldSeparator, record.Range, - fieldSeparator, record.OriginRange, fieldSeparator, record.PieceStyle) + return fmt.Sprint(record.PieceNum, fieldSeparator, record.PieceLen, fieldSeparator, record.Md5, fieldSeparator, record.Range, fieldSeparator, + record.OriginRange, fieldSeparator, record.PieceStyle) } func ParsePieceMetaRecord(value string) (record *PieceMetaRecord, err error) { @@ -122,9 +132,6 @@ func ParsePieceMetaRecord(value string) (record *PieceMetaRecord, err error) { if err != nil { return nil, errors.Wrapf(err, "invalid origin range:%s", fields[4]) } - if err != nil { - return nil, errors.Wrapf(err, "invalid offset:%s", fields[4]) - } pieceStyle, err := strconv.ParseInt(fields[5], 10, 8) if err != nil { return nil, errors.Wrapf(err, "invalid pieceStyle:%s", fields[5]) @@ -139,39 +146,146 @@ func ParsePieceMetaRecord(value string) (record *PieceMetaRecord, err error) { }, nil } -func NewManager(cfg *config.Config) (Manager, error) { - sb := getBuilder(cfg.StoragePattern, true) - if sb == nil { - return nil, fmt.Errorf("could not get storage for pattern: %s", cfg.StoragePattern) - } - logger.Debugf("storage pattern is %s", sb.Name()) - return sb.Build(cfg) +type managerPlugin struct { + // name is a unique identifier, you can also name it ID. + name string + // instance holds a manger instant which implements the interface of Manager. + instance Manager } -type Manager interface { - ResetRepo(context.Context, *types.SeedTask) error +func (m *managerPlugin) Type() plugins.PluginType { + return plugins.StorageManagerPlugin +} - StatDownloadFile(context.Context, string) (*storedriver.StorageInfo, error) +func (m *managerPlugin) Name() string { + return m.name +} - WriteDownloadFile(context.Context, string, int64, int64, *bytes.Buffer) error +func (m *managerPlugin) ResetRepo(task *types.SeedTask) error { + return m.instance.ResetRepo(task) +} - ReadDownloadFile(context.Context, string) (io.ReadCloser, error) +func (m *managerPlugin) StatDownloadFile(path string) (*storedriver.StorageInfo, error) { + return m.instance.StatDownloadFile(path) +} - CreateUploadLink(context.Context, string) error +func (m *managerPlugin) WriteDownloadFile(s string, i int64, i2 int64, buffer *bytes.Buffer) error { + return m.instance.WriteDownloadFile(s, i, i2, buffer) +} - ReadFileMetaData(context.Context, string) (*FileMetaData, error) +func (m *managerPlugin) ReadDownloadFile(s string) (io.ReadCloser, error) { + return m.instance.ReadDownloadFile(s) +} - WriteFileMetaData(context.Context, string, *FileMetaData) error +func (m *managerPlugin) CreateUploadLink(s string) error { + return m.instance.CreateUploadLink(s) +} - WritePieceMetaRecords(context.Context, string, []*PieceMetaRecord) error +func (m *managerPlugin) ReadFileMetaData(s string) (*FileMetaData, error) { + return m.instance.ReadFileMetaData(s) +} - AppendPieceMetaData(context.Context, string, *PieceMetaRecord) error +func (m *managerPlugin) WriteFileMetaData(s string, data *FileMetaData) error { + return m.instance.WriteFileMetaData(s, data) +} + +func (m *managerPlugin) WritePieceMetaRecords(s string, records []*PieceMetaRecord) error { + return m.instance.WritePieceMetaRecords(s, records) +} + +func (m *managerPlugin) AppendPieceMetaData(s string, record *PieceMetaRecord) error { + return m.instance.AppendPieceMetaData(s, record) +} + +func (m *managerPlugin) ReadPieceMetaRecords(s string) ([]*PieceMetaRecord, error) { + return m.instance.ReadPieceMetaRecords(s) +} + +func (m *managerPlugin) DeleteTask(s string) error { + return m.instance.DeleteTask(s) +} + +// ManagerBuilder is a function that creates a new storage manager plugin instant with the giving conf. +type ManagerBuilder func(cfg *Config) (Manager, error) + +// Register defines an interface to register a storage manager with specified name. +// All storage managers should call this function to register itself to the storage manager factory. +func Register(name string, builder ManagerBuilder) { + name = strings.ToLower(name) + // plugin builder + var f = func(conf interface{}) (plugins.Plugin, error) { + cfg := &Config{} + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc(func(from, to reflect.Type, v interface{}) (interface{}, error) { + switch to { + case reflect.TypeOf(unit.B), + reflect.TypeOf(time.Second): + b, _ := yaml.Marshal(v) + p := reflect.New(to) + if err := yaml.Unmarshal(b, p.Interface()); err != nil { + return nil, err + } + return p.Interface(), nil + default: + return v, nil + } + }), + Result: cfg, + }) + if err != nil { + return nil, fmt.Errorf("failed to create decoder: %v", err) + } + err = decoder.Decode(conf) + if err != nil { + return nil, fmt.Errorf("failed to parse config: %v", err) + } + return newManagerPlugin(name, builder, cfg) + } + plugins.RegisterPluginBuilder(plugins.StorageManagerPlugin, name, f) +} + +func newManagerPlugin(name string, builder ManagerBuilder, cfg *Config) (plugins.Plugin, error) { + if name == "" || builder == nil { + return nil, fmt.Errorf("storage manager plugin's name and builder cannot be nil") + } + + instant, err := builder(cfg) + if err != nil { + return nil, fmt.Errorf("failed to init storage manager %s: %v", name, err) + } + + return &managerPlugin{ + name: name, + instance: instant, + }, nil +} - ReadPieceMetaRecords(context.Context, string) ([]*PieceMetaRecord, error) +// Get a storage manager from manager with specified name. +func Get(name string) (Manager, error) { + v := plugins.GetPlugin(plugins.StorageManagerPlugin, strings.ToLower(name)) + if v == nil { + return nil, fmt.Errorf("storage manager: %s not existed", name) + } + if plugin, ok := v.(*managerPlugin); ok { + return plugin.instance, nil + } + return nil, fmt.Errorf("get store manager %s storage error: unknown reason", name) +} - DeleteTask(context.Context, string) error +type Config struct { + GCInitialDelay time.Duration `yaml:"gcInitialDelay"` + GCInterval time.Duration `yaml:"gcInterval"` + DriverConfigs map[string]*DriverConfig `yaml:"driverConfigs"` +} - SetTaskMgr(mgr.SeedTaskMgr) +type DriverConfig struct { + GCConfig *GCConfig `yaml:"gcConfig"` +} - InitializeCleaners() +// GcConfig +type GCConfig struct { + YoungGCThreshold unit.Bytes `yaml:"youngGCThreshold"` + FullGCThreshold unit.Bytes `yaml:"fullGCThreshold"` + CleanRatio int `yaml:"cleanRatio"` + IntervalThreshold time.Duration `yaml:"intervalThreshold"` } diff --git a/cdnsystem/daemon/mgr/gc/manager.go b/cdnsystem/daemon/mgr/gc/manager.go index cce8b2d57..32c51a01c 100644 --- a/cdnsystem/daemon/mgr/gc/manager.go +++ b/cdnsystem/daemon/mgr/gc/manager.go @@ -21,7 +21,6 @@ import ( "sync" "time" - "d7y.io/dragonfly/v2/cdnsystem/config" "d7y.io/dragonfly/v2/cdnsystem/daemon/mgr" logger "d7y.io/dragonfly/v2/pkg/dflog" ) @@ -46,6 +45,7 @@ var ( gcExecutorWrappers = make(map[string]*ExecutorWrapper) ) +// Register a gc task func Register(name string, gcInitialDelay time.Duration, gcInterval time.Duration, gcExecutor Executor) { gcExecutorWrappers[name] = &ExecutorWrapper{ gcInitialDelay: gcInitialDelay, @@ -56,7 +56,6 @@ func Register(name string, gcInitialDelay time.Duration, gcInterval time.Duratio // Manager is an implementation of the interface of GCMgr. type Manager struct { - cfg *config.Config taskMgr mgr.SeedTaskMgr cdnMgr mgr.CDNMgr } @@ -75,9 +74,8 @@ func (gcm *Manager) GCTask(ctx context.Context, taskID string, full bool) error } // NewManager returns a new Manager. -func NewManager(cfg *config.Config, taskMgr mgr.SeedTaskMgr, cdnMgr mgr.CDNMgr) (*Manager, error) { +func NewManager(taskMgr mgr.SeedTaskMgr, cdnMgr mgr.CDNMgr) (*Manager, error) { return &Manager{ - cfg: cfg, taskMgr: taskMgr, cdnMgr: cdnMgr, }, nil @@ -103,7 +101,9 @@ func (gcm *Manager) StartGC(ctx context.Context) error { logger.Infof("exit %s gc task", name) return case <-ticker.C: - wrapper.gcExecutor.GC(ctx) + if err := wrapper.gcExecutor.GC(ctx); err != nil { + logger.Errorf("%s gc task execute failed: %v", name, err) + } } } }(name, executorWrapper) diff --git a/cdnsystem/daemon/mgr/gc_mgr.go b/cdnsystem/daemon/mgr/gc_mgr.go index 7acdafcf8..66110637e 100644 --- a/cdnsystem/daemon/mgr/gc_mgr.go +++ b/cdnsystem/daemon/mgr/gc_mgr.go @@ -21,7 +21,7 @@ import "context" // GCMgr as an interface defines all operations about gc operation. type GCMgr interface { - // StartGC starts to execute GC with a new goroutine. + // StartGC starts to execute GC StartGC(ctx context.Context) error // GCTask is used to do the gc task job with specified taskID. diff --git a/cdnsystem/daemon/mgr/progress/manager.go b/cdnsystem/daemon/mgr/progress/manager.go index 63b57981b..e3917d26c 100644 --- a/cdnsystem/daemon/mgr/progress/manager.go +++ b/cdnsystem/daemon/mgr/progress/manager.go @@ -22,7 +22,6 @@ import ( "sync" "time" - "d7y.io/dragonfly/v2/cdnsystem/config" "d7y.io/dragonfly/v2/cdnsystem/daemon/mgr" "d7y.io/dragonfly/v2/cdnsystem/types" "d7y.io/dragonfly/v2/pkg/dferrors" @@ -39,7 +38,6 @@ func init() { } type Manager struct { - cfg *config.Config seedSubscribers *syncmap.SyncMap taskPieceMetaRecords *syncmap.SyncMap taskMgr mgr.SeedTaskMgr @@ -52,9 +50,8 @@ func (pm *Manager) SetTaskMgr(taskMgr mgr.SeedTaskMgr) { pm.taskMgr = taskMgr } -func NewManager(cfg *config.Config) (*Manager, error) { +func NewManager() (mgr.SeedProgressMgr, error) { return &Manager{ - cfg: cfg, seedSubscribers: syncmap.NewSyncMap(), taskPieceMetaRecords: syncmap.NewSyncMap(), mu: synclock.NewLockerPool(), @@ -106,7 +103,7 @@ func (pm *Manager) WatchSeedProgress(ctx context.Context, taskID string) (<-chan case <-time.After(pm.timeout): } } - if task, err := pm.taskMgr.Get(ctx, taskID); err == nil && task.IsDone() { + if task, err := pm.taskMgr.Get(taskID); err == nil && task.IsDone() { chanList.Remove(ele) close(seedCh) } @@ -121,7 +118,7 @@ func (pm *Manager) PublishPiece(ctx context.Context, taskID string, record *type defer pm.mu.UnLock(taskID, false) err := pm.setPieceMetaRecord(taskID, record) if err != nil { - errors.Wrap(err, "failed to set piece meta record") + return errors.Wrap(err, "failed to set piece meta record") } chanList, err := pm.seedSubscribers.GetAsList(taskID) if err != nil { diff --git a/cdnsystem/daemon/mgr/progress_mgr.go b/cdnsystem/daemon/mgr/progress_mgr.go index 8d7ff495c..bb498ada2 100644 --- a/cdnsystem/daemon/mgr/progress_mgr.go +++ b/cdnsystem/daemon/mgr/progress_mgr.go @@ -40,6 +40,8 @@ type SeedProgressMgr interface { // GetPieces get pieces by taskID GetPieces(context.Context, string) (records []*types.SeedPiece, err error) - // Clear + // Clear meta info of task Clear(context.Context, string) error + + SetTaskMgr(taskMgr SeedTaskMgr) } diff --git a/cdnsystem/daemon/mgr/task/manager.go b/cdnsystem/daemon/mgr/task/manager.go index c242d22b2..4da75ac49 100644 --- a/cdnsystem/daemon/mgr/task/manager.go +++ b/cdnsystem/daemon/mgr/task/manager.go @@ -35,12 +35,9 @@ import ( "github.com/pkg/errors" ) -func init() { - // Ensure that Manager implements the SeedTaskMgr and gcExecutor interfaces - var manager *Manager = nil - var _ mgr.SeedTaskMgr = manager - var _ gc.Executor = manager -} +// Ensure that Manager implements the SeedTaskMgr and gcExecutor interfaces +var _ mgr.SeedTaskMgr = (*Manager)(nil) +var _ gc.Executor = (*Manager)(nil) // Manager is an implementation of the interface of TaskMgr. type Manager struct { @@ -54,8 +51,7 @@ type Manager struct { } // NewManager returns a new Manager Object. -func NewManager(cfg *config.Config, cdnMgr mgr.CDNMgr, progressMgr mgr.SeedProgressMgr, - resourceClient source.ResourceClient) (*Manager, error) { +func NewManager(cfg *config.Config, cdnMgr mgr.CDNMgr, progressMgr mgr.SeedProgressMgr, resourceClient source.ResourceClient) (*Manager, error) { taskMgr := &Manager{ cfg: cfg, taskStore: syncmap.NewSyncMap(), @@ -65,6 +61,7 @@ func NewManager(cfg *config.Config, cdnMgr mgr.CDNMgr, progressMgr mgr.SeedProgr cdnMgr: cdnMgr, progressMgr: progressMgr, } + progressMgr.SetTaskMgr(taskMgr) gc.Register("task", cfg.GCInitialDelay, cfg.GCMetaInterval, taskMgr) return taskMgr, nil } @@ -125,7 +122,12 @@ func (tm *Manager) triggerCdnSyncAction(ctx context.Context, task *types.SeedTas if err != nil { logger.WithTaskID(task.TaskID).Errorf("trigger cdn get error: %v", err) } - go tm.progressMgr.PublishTask(ctx, task.TaskID, updateTaskInfo) + go func() { + if err := tm.progressMgr.PublishTask(ctx, task.TaskID, updateTaskInfo); err != nil { + logger.WithTaskID(task.TaskID).Errorf("failed to publish task: %v", err) + } + + }() updatedTask, err = tm.updateTask(task.TaskID, updateTaskInfo) if err != nil { logger.WithTaskID(task.TaskID).Errorf("failed to update task:%v", err) @@ -158,11 +160,11 @@ func (tm *Manager) getTask(taskID string) (*types.SeedTask, error) { return nil, errors.Wrapf(cdnerrors.ErrConvertFailed, "origin object: %+v", v) } -func (tm Manager) Get(ctx context.Context, taskID string) (*types.SeedTask, error) { +func (tm Manager) Get(taskID string) (*types.SeedTask, error) { return tm.getTask(taskID) } -func (tm Manager) GetAccessTime(ctx context.Context) (*syncmap.SyncMap, error) { +func (tm Manager) GetAccessTime() (*syncmap.SyncMap, error) { return tm.accessTimeMap, nil } @@ -191,7 +193,7 @@ func (tm *Manager) GC(ctx context.Context) error { var removedTaskCount int startTime := time.Now() // get all taskIDs and the corresponding accessTime - taskAccessMap, err := tm.GetAccessTime(ctx) + taskAccessMap, err := tm.GetAccessTime() if err != nil { return fmt.Errorf("gc tasks: failed to get task accessTime map for GC: %v", err) } diff --git a/cdnsystem/daemon/mgr/task_mgr.go b/cdnsystem/daemon/mgr/task_mgr.go index 402454e1e..c0ab4148e 100644 --- a/cdnsystem/daemon/mgr/task_mgr.go +++ b/cdnsystem/daemon/mgr/task_mgr.go @@ -32,10 +32,10 @@ type SeedTaskMgr interface { Register(context.Context, *types.TaskRegisterRequest) (pieceCh <-chan *types.SeedPiece, err error) // Get get task Info with specified taskId. - Get(context.Context, string) (*types.SeedTask, error) + Get(string) (*types.SeedTask, error) // GetAccessTime get all tasks accessTime. - GetAccessTime(context.Context) (*syncmap.SyncMap, error) + GetAccessTime() (*syncmap.SyncMap, error) // Delete delete a task. Delete(context.Context, string) error diff --git a/cdnsystem/plugins/plugin_manager.go b/cdnsystem/plugins/plugin_manager.go index c9dbbe8dd..cf602e950 100644 --- a/cdnsystem/plugins/plugin_manager.go +++ b/cdnsystem/plugins/plugin_manager.go @@ -18,8 +18,6 @@ package plugins import ( "sync" - - "d7y.io/dragonfly/v2/cdnsystem/config" ) // NewManager creates a default plugin manager instant. @@ -33,35 +31,35 @@ func NewManager() Manager { // NewRepository creates a default repository instant. func NewRepository() Repository { return &repositoryIml{ - repos: make(map[config.PluginType]*sync.Map), + repos: make(map[PluginType]*sync.Map), } } // Manager manages all plugin builders and plugin instants. type Manager interface { // GetBuilder adds a Builder object with the giving plugin type and name. - AddBuilder(pt config.PluginType, name string, b Builder) + AddBuilder(pt PluginType, name string, b Builder) // GetBuilder returns a Builder object with the giving plugin type and name. - GetBuilder(pt config.PluginType, name string) Builder + GetBuilder(pt PluginType, name string) Builder // DeleteBuilder deletes a builder with the giving plugin type and name. - DeleteBuilder(pt config.PluginType, name string) + DeleteBuilder(pt PluginType, name string) // AddPlugin adds a plugin into this manager. AddPlugin(p Plugin) // GetPlugin returns a plugin with the giving plugin type and name. - GetPlugin(pt config.PluginType, name string) Plugin + GetPlugin(pt PluginType, name string) Plugin // DeletePlugin deletes a plugin with the giving plugin type and name. - DeletePlugin(pt config.PluginType, name string) + DeletePlugin(pt PluginType, name string) } // Plugin defines methods that plugins need to implement. type Plugin interface { // Type returns the type of this plugin. - Type() config.PluginType + Type() PluginType // Name returns the name of this plugin. Name() string @@ -73,15 +71,15 @@ type Builder func(conf interface{}) (Plugin, error) // Repository stores data related to plugin. type Repository interface { // Add adds a data to this repository. - Add(pt config.PluginType, name string, data interface{}) + Add(pt PluginType, name string, data interface{}) // Get gets a data with the giving type and name from this // repository. - Get(pt config.PluginType, name string) interface{} + Get(pt PluginType, name string) interface{} // Delete deletes a data with the giving type and name from // this repository. - Delete(pt config.PluginType, name string) + Delete(pt PluginType, name string) } // ----------------------------------------------------------------------------- @@ -94,14 +92,14 @@ type managerIml struct { var _ Manager = (*managerIml)(nil) -func (m *managerIml) AddBuilder(pt config.PluginType, name string, b Builder) { +func (m *managerIml) AddBuilder(pt PluginType, name string, b Builder) { if b == nil { return } m.builders.Add(pt, name, b) } -func (m *managerIml) GetBuilder(pt config.PluginType, name string) Builder { +func (m *managerIml) GetBuilder(pt PluginType, name string) Builder { data := m.builders.Get(pt, name) if data == nil { return nil @@ -112,7 +110,7 @@ func (m *managerIml) GetBuilder(pt config.PluginType, name string) Builder { return nil } -func (m *managerIml) DeleteBuilder(pt config.PluginType, name string) { +func (m *managerIml) DeleteBuilder(pt PluginType, name string) { m.builders.Delete(pt, name) } @@ -123,7 +121,7 @@ func (m *managerIml) AddPlugin(p Plugin) { m.plugins.Add(p.Type(), p.Name(), p) } -func (m *managerIml) GetPlugin(pt config.PluginType, name string) Plugin { +func (m *managerIml) GetPlugin(pt PluginType, name string) Plugin { data := m.plugins.Get(pt, name) if data == nil { return nil @@ -134,7 +132,7 @@ func (m *managerIml) GetPlugin(pt config.PluginType, name string) Plugin { return nil } -func (m *managerIml) DeletePlugin(pt config.PluginType, name string) { +func (m *managerIml) DeletePlugin(pt PluginType, name string) { m.plugins.Delete(pt, name) } @@ -142,13 +140,13 @@ func (m *managerIml) DeletePlugin(pt config.PluginType, name string) { // implementation of Repository type repositoryIml struct { - repos map[config.PluginType]*sync.Map + repos map[PluginType]*sync.Map lock sync.Mutex } var _ Repository = (*repositoryIml)(nil) -func (r *repositoryIml) Add(pt config.PluginType, name string, data interface{}) { +func (r *repositoryIml) Add(pt PluginType, name string, data interface{}) { if data == nil || !validate(pt, name) { return } @@ -157,7 +155,7 @@ func (r *repositoryIml) Add(pt config.PluginType, name string, data interface{}) m.Store(name, data) } -func (r *repositoryIml) Get(pt config.PluginType, name string) interface{} { +func (r *repositoryIml) Get(pt PluginType, name string) interface{} { if !validate(pt, name) { return nil } @@ -169,7 +167,7 @@ func (r *repositoryIml) Get(pt config.PluginType, name string) interface{} { return nil } -func (r *repositoryIml) Delete(pt config.PluginType, name string) { +func (r *repositoryIml) Delete(pt PluginType, name string) { if !validate(pt, name) { return } @@ -177,7 +175,7 @@ func (r *repositoryIml) Delete(pt config.PluginType, name string) { m.Delete(name) } -func (r *repositoryIml) getRepo(pt config.PluginType) *sync.Map { +func (r *repositoryIml) getRepo(pt PluginType) *sync.Map { var ( m *sync.Map ok bool @@ -198,12 +196,12 @@ func (r *repositoryIml) getRepo(pt config.PluginType) *sync.Map { // ----------------------------------------------------------------------------- // helper functions -func validate(pt config.PluginType, name string) bool { +func validate(pt PluginType, name string) bool { if name == "" { return false } - for i := len(config.PluginTypes) - 1; i >= 0; i-- { - if pt == config.PluginTypes[i] { + for i := len(PluginTypes) - 1; i >= 0; i-- { + if pt == PluginTypes[i] { return true } } diff --git a/cdnsystem/config/plugin_type.go b/cdnsystem/plugins/plugin_type.go similarity index 70% rename from cdnsystem/config/plugin_type.go rename to cdnsystem/plugins/plugin_type.go index de7d2acb5..4f95e4743 100644 --- a/cdnsystem/config/plugin_type.go +++ b/cdnsystem/plugins/plugin_type.go @@ -14,22 +14,22 @@ * limitations under the License. */ -package config +package plugins // PluginType defines the type of plugin. type PluginType string const ( - // StoragePlugin the storage plugin type. - StoragePlugin = PluginType("storage") - - // SourceClientPlugin the source client plugin type - SourceClientPlugin = PluginType("sourceClient") + // StorageDriverPlugin the storage driver plugin type. + StorageDriverPlugin = PluginType("storagedriver") + // StorageManagerPlugin the storage manager plugin type + StorageManagerPlugin = PluginType("storagemanager") ) // PluginTypes explicitly stores all available plugin types. var PluginTypes = []PluginType{ - StoragePlugin, SourceClientPlugin, + // The order here is very important and represents the dependency loading order of the plugin。 driver plugins should be loaded before manager plugins + StorageDriverPlugin, StorageManagerPlugin, } // PluginProperties the properties of a plugin. diff --git a/cdnsystem/plugins/plugins.go b/cdnsystem/plugins/plugins.go index 6c3facdb3..287b39881 100644 --- a/cdnsystem/plugins/plugins.go +++ b/cdnsystem/plugins/plugins.go @@ -19,7 +19,6 @@ package plugins import ( "fmt" - "d7y.io/dragonfly/v2/cdnsystem/config" logger "d7y.io/dragonfly/v2/pkg/dflog" ) @@ -31,9 +30,10 @@ func SetManager(m Manager) { } // Initialize builds all plugins defined in config file. -func Initialize(cfg *config.Config) error { - for pt, value := range cfg.Plugins { - for _, v := range value { +func Initialize(plugins map[PluginType][]*PluginProperties) error { + // todo Plugin loads sequence dependencies + for _, pt := range PluginTypes { + for _, v := range plugins[pt] { if !v.Enable { logger.Infof("plugin[%s][%s] is disabled", pt, v.Name) continue @@ -55,13 +55,13 @@ func Initialize(cfg *config.Config) error { return nil } -// RegisterPlugin register a plugin builder that will be called to create a new +// RegisterPluginBuilder register a plugin builder that will be called to create a new // plugin instant when cdn starts. -func RegisterPlugin(pt config.PluginType, name string, builder Builder) { +func RegisterPluginBuilder(pt PluginType, name string, builder Builder) { mgr.AddBuilder(pt, name, builder) } // GetPlugin returns a plugin instant with the giving plugin type and name. -func GetPlugin(pt config.PluginType, name string) Plugin { +func GetPlugin(pt PluginType, name string) Plugin { return mgr.GetPlugin(pt, name) } diff --git a/cdnsystem/plugins/plugins_test.go b/cdnsystem/plugins/plugins_test.go index 962adfab8..37ba28899 100644 --- a/cdnsystem/plugins/plugins_test.go +++ b/cdnsystem/plugins/plugins_test.go @@ -17,11 +17,9 @@ package plugins import ( - "fmt" "reflect" "testing" - "d7y.io/dragonfly/v2/cdnsystem/config" "github.com/stretchr/testify/suite" ) @@ -55,56 +53,7 @@ func (s *PluginsTestSuite) TestSetManager() { // ----------------------------------------------------------------------------- func (s *PluginsTestSuite) TestInitialize() { - var testCase = func(cfg *config.Config, b Builder, - pt config.PluginType, name string, hasPlugin bool, errMsg string) { - SetManager(NewManager()) - RegisterPlugin(pt, name, b) - err := Initialize(cfg) - plugin := GetPlugin(pt, name) - - if errMsg != "" { - s.NotNil(err) - s.EqualError(err, ".*"+errMsg+".*") - s.Nil(plugin) - } else { - s.Nil(err) - if hasPlugin { - s.Equal(plugin.Type(), pt) - s.Equal(plugin.Name(), name) - } else { - s.Nil(plugin) - } - } - } - var testFunc = func(pt config.PluginType) { - errMsg := "build error" - name := "test" - var createBuilder = func(err bool) Builder { - return func(conf interface{}) (plugin Plugin, e error) { - if err { - return nil, fmt.Errorf(errMsg) - } - return &mockPlugin{pt, name}, nil - } - } - var createConf = func(enabled bool) *config.Config { - plugins := make(map[config.PluginType][]*config.PluginProperties) - plugins[pt] = []*config.PluginProperties{{Name: name, Enable: enabled}} - return &config.Config{Plugins: plugins} - } - testCase(createConf(false), createBuilder(false), - pt, name, false, "") - testCase(createConf(true), nil, - pt, name, false, "cannot find builder") - testCase(createConf(true), createBuilder(true), - pt, name, false, errMsg) - testCase(createConf(true), createBuilder(false), - pt, name, true, "") - } - for _, pt := range config.PluginTypes { - testFunc(pt) - } } func (s *PluginsTestSuite) TestManagerIml_Builder() { @@ -113,7 +62,7 @@ func (s *PluginsTestSuite) TestManagerIml_Builder() { } manager := NewManager() - var testFunc = func(pt config.PluginType, name string, b Builder, result bool) { + var testFunc = func(pt PluginType, name string, b Builder, result bool) { manager.AddBuilder(pt, name, b) obj := manager.GetBuilder(pt, name) if result { @@ -127,8 +76,8 @@ func (s *PluginsTestSuite) TestManagerIml_Builder() { } } - testFunc(config.PluginType("test"), "test", builder, false) - for _, pt := range config.PluginTypes { + testFunc(PluginType("test"), "test", builder, false) + for _, pt := range PluginTypes { testFunc(pt, "test", builder, true) testFunc(pt, "", nil, false) testFunc(pt, "", builder, false) @@ -152,7 +101,7 @@ func (s *PluginsTestSuite) TestManagerIml_Plugin() { } testFunc(&mockPlugin{"test", "test"}, false) - for _, pt := range config.PluginTypes { + for _, pt := range PluginTypes { testFunc(&mockPlugin{pt, "test"}, true) testFunc(&mockPlugin{pt, ""}, false) } @@ -160,15 +109,15 @@ func (s *PluginsTestSuite) TestManagerIml_Plugin() { func (s *PluginsTestSuite) TestRepositoryIml() { type testCase struct { - pt config.PluginType + pt PluginType name string data interface{} addResult bool } var createCase = func(validPlugin bool, name string, data interface{}, result bool) testCase { - pt := config.StoragePlugin + pt := StorageDriverPlugin if !validPlugin { - pt = config.PluginType("test-validPlugin") + pt = PluginType("test-validPlugin") } return testCase{ pt: pt, @@ -208,15 +157,15 @@ func (s *PluginsTestSuite) TestRepositoryIml() { func (s *PluginsTestSuite) TestValidate() { type testCase struct { - pt config.PluginType + pt PluginType name string expected bool } var cases = []testCase{ - {config.PluginType("test"), "", false}, - {config.PluginType("test"), "test", false}, + {PluginType("test"), "", false}, + {PluginType("test"), "test", false}, } - for _, pt := range config.PluginTypes { + for _, pt := range PluginTypes { cases = append(cases, testCase{pt, "", false}, testCase{pt, "test", true}, @@ -230,11 +179,11 @@ func (s *PluginsTestSuite) TestValidate() { // ----------------------------------------------------------------------------- type mockPlugin struct { - pt config.PluginType + pt PluginType name string } -func (m *mockPlugin) Type() config.PluginType { +func (m *mockPlugin) Type() PluginType { return m.pt } diff --git a/cdnsystem/server/server.go b/cdnsystem/server/server.go index 23f00dc33..5e5b533ba 100644 --- a/cdnsystem/server/server.go +++ b/cdnsystem/server/server.go @@ -58,40 +58,39 @@ type Server struct { // New creates a brand new server instance. func New(cfg *config.Config) (*Server, error) { - if err := plugins.Initialize(cfg); err != nil { + if err := plugins.Initialize(cfg.Plugins); err != nil { return nil, err } - storageMgr, err := storage.NewManager(cfg) - if err != nil { - return nil, errors.Wrapf(err, "failed to create storage manager") - } + // source client sourceClient, err := source.NewSourceClient() if err != nil { return nil, errors.Wrapf(err, "failed to create source client") } // progress manager - progressMgr, err := progress.NewManager(cfg) + progressMgr, err := progress.NewManager() if err != nil { return nil, errors.Wrapf(err, "failed to create progress manager") } + // storage manager + storageMgr, err := storage.Get(cfg.StorageMode) + if err != nil { + return nil, errors.Wrapf(err, "failed to create storage manager") + } // cdn manager cdnMgr, err := cdn.NewManager(cfg, storageMgr, progressMgr, sourceClient) if err != nil { return nil, errors.Wrapf(err, "failed to create cdn manager") } - // task manager taskMgr, err := task.NewManager(cfg, cdnMgr, progressMgr, sourceClient) if err != nil { return nil, errors.Wrapf(err, "failed to create task manager") } - storageMgr.SetTaskMgr(taskMgr) - storageMgr.InitializeCleaners() - progressMgr.SetTaskMgr(taskMgr) + storageMgr.Initialize(taskMgr) // gc manager - gcMgr, err := gc.NewManager(cfg, taskMgr, cdnMgr) + gcMgr, err := gc.NewManager(taskMgr, cdnMgr) if err != nil { return nil, errors.Wrapf(err, "failed to create gc manager") } diff --git a/cdnsystem/server/service/cdn_seed_server.go b/cdnsystem/server/service/cdn_seed_server.go index acf3b6afb..9ccceee26 100644 --- a/cdnsystem/server/service/cdn_seed_server.go +++ b/cdnsystem/server/service/cdn_seed_server.go @@ -110,9 +110,9 @@ func (css *CdnSeedServer) ObtainSeeds(ctx context.Context, req *cdnsystem.SeedRe if err != nil { return dferrors.Newf(dfcodes.CdnTaskRegistryFail, "failed to register seed task(%s):%v", req.TaskId, err) } - task, err := css.taskMgr.Get(ctx, req.TaskId) + task, err := css.taskMgr.Get(req.TaskId) if err != nil { - return err + return dferrors.Newf(dfcodes.CdnError, "failed to get task(%s): %v", req.TaskId, err) } peerID := cdnutil.GenCDNPeerID(req.TaskId) for piece := range pieceChan { @@ -132,9 +132,6 @@ func (css *CdnSeedServer) ObtainSeeds(ctx context.Context, req *cdnsystem.SeedRe } } - if err != nil { - return dferrors.Newf(dfcodes.CdnError, "failed to get task(%s): %v", req.TaskId, err) - } if task.CdnStatus != types.TaskInfoCdnStatusSuccess { return dferrors.Newf(dfcodes.CdnTaskDownloadFail, "task(%s) status error , status: %s", req.TaskId, task.CdnStatus) } @@ -159,7 +156,7 @@ func (css *CdnSeedServer) GetPieceTasks(ctx context.Context, req *base.PieceTask if err := checkPieceTasksRequestParams(req); err != nil { return nil, dferrors.Newf(dfcodes.BadRequest, "failed to validate seed request for task(%s): %v", req.TaskId, err) } - task, err := css.taskMgr.Get(ctx, req.TaskId) + task, err := css.taskMgr.Get(req.TaskId) logger.Debugf("task:%+v", task) if err != nil { if cdnerrors.IsDataNotFound(err) { diff --git a/cdnsystem/storedriver/driver.go b/cdnsystem/storedriver/driver.go index 71ccdd37b..038b3cd8a 100644 --- a/cdnsystem/storedriver/driver.go +++ b/cdnsystem/storedriver/driver.go @@ -17,12 +17,16 @@ package storedriver import ( - "context" + "fmt" "io" "path/filepath" "time" + "d7y.io/dragonfly/v2/cdnsystem/cdnerrors" + "d7y.io/dragonfly/v2/cdnsystem/plugins" "d7y.io/dragonfly/v2/pkg/unit" + "d7y.io/dragonfly/v2/pkg/util/stringutils" + "github.com/pkg/errors" ) // Driver defines an interface to manage the data stored in the driver. @@ -36,47 +40,47 @@ type Driver interface { // Get data from the storage based on raw information. // If the length<=0, the driver should return all data from the raw.offset. // Otherwise, just return the data which starts from raw.offset and the length is raw.length. - Get(ctx context.Context, raw *Raw) (io.ReadCloser, error) + Get(raw *Raw) (io.ReadCloser, error) // Get data from the storage based on raw information. // The data should be returned in bytes. // If the length<=0, the storage driver should return all data from the raw.offset. // Otherwise, just return the data which starts from raw.offset and the length is raw.length. - GetBytes(ctx context.Context, raw *Raw) ([]byte, error) + GetBytes(raw *Raw) ([]byte, error) // Put the data into the storage with raw information. // The storage will get data from io.Reader as io stream. // If the offset>0, the storage driver should starting at byte raw.offset off. - Put(ctx context.Context, raw *Raw, data io.Reader) error + Put(raw *Raw, data io.Reader) error // PutBytes puts the data into the storage with raw information. // The data is passed in bytes. // If the offset>0, the storage driver should starting at byte raw.offset off. - PutBytes(ctx context.Context, raw *Raw, data []byte) error + PutBytes(raw *Raw, data []byte) error // Remove the data from the storage based on raw information. - Remove(ctx context.Context, raw *Raw) error + Remove(raw *Raw) error // Stat determines whether the data exists based on raw information. // If that, and return some info that in the form of struct StorageInfo. // If not, return the ErrFileNotExist. - Stat(ctx context.Context, raw *Raw) (*StorageInfo, error) + Stat(raw *Raw) (*StorageInfo, error) // GetAvailSpace returns the available disk space in B. - GetAvailSpace(ctx context.Context) (unit.Bytes, error) + GetAvailSpace() (unit.Bytes, error) // GetTotalAndFreeSpace - GetTotalAndFreeSpace(ctx context.Context) (unit.Bytes, unit.Bytes, error) + GetTotalAndFreeSpace() (unit.Bytes, unit.Bytes, error) // GetTotalSpace - GetTotalSpace(ctx context.Context) (unit.Bytes, error) + GetTotalSpace() (unit.Bytes, error) // Walk walks the file tree rooted at root which determined by raw.Bucket and raw.Key, // calling walkFn for each file or directory in the tree, including root. - Walk(ctx context.Context, raw *Raw) error + Walk(raw *Raw) error // CreateBaseDir - CreateBaseDir(ctx context.Context) error + CreateBaseDir() error // GetPath GetPath(raw *Raw) string @@ -85,13 +89,14 @@ type Driver interface { MoveFile(src string, dst string) error // Exits - Exits(ctx context.Context, raw *Raw) bool + Exits(raw *Raw) bool // GetHomePath - GetHomePath(ctx context.Context) string + GetHomePath() string +} - // GetGcConfig - GetGcConfig(ctx context.Context) *GcConfig +type Config struct { + BaseDir string `yaml:"baseDir"` } // Raw identifies a piece of data uniquely. @@ -115,10 +120,150 @@ type StorageInfo struct { ModTime time.Time // modified time } -// GcConfig -type GcConfig struct { - YoungGCThreshold unit.Bytes `yaml:"youngGCThreshold"` - FullGCThreshold unit.Bytes `yaml:"fullGCThreshold"` - CleanRatio int `yaml:"cleanRatio"` - IntervalThreshold time.Duration `yaml:"intervalThreshold"` +// driverPlugin is a wrapper of the storage driver which implements the interface of Driver. +type driverPlugin struct { + // name is a unique identifier, you can also name it ID. + name string + // instance holds a storage which implements the interface of driverPlugin. + instance Driver +} + +// Ensure that driverPlugin implements the interface of Driver +var _ Driver = (*driverPlugin)(nil) + +// Ensure that driverPlugin implements the interface plugins.Plugin +var _ plugins.Plugin = (*driverPlugin)(nil) + +// NewDriverPlugin creates a new storage Driver Plugin instance. +func newDriverPlugin(name string, builder DriverBuilder, cfg *Config) (plugins.Plugin, error) { + if name == "" || builder == nil { + return nil, fmt.Errorf("storage driver plugin's name and builder cannot be nil") + } + // init driver with specific config + driver, err := builder(cfg) + if err != nil { + return nil, fmt.Errorf("failed to init storage driver %s: %v", name, err) + } + + return &driverPlugin{ + name: name, + instance: driver, + }, nil +} + +// Type returns the plugin type StorageDriverPlugin. +func (s *driverPlugin) Type() plugins.PluginType { + return plugins.StorageDriverPlugin +} + +// Name returns the plugin name. +func (s *driverPlugin) Name() string { + return s.name +} + +// GetTotalSpace +func (s *driverPlugin) GetTotalSpace() (unit.Bytes, error) { + return s.instance.GetTotalSpace() +} + +// CreateBaseDir +func (s *driverPlugin) CreateBaseDir() error { + return s.instance.CreateBaseDir() +} + +func (s *driverPlugin) Exits(raw *Raw) bool { + return s.instance.Exits(raw) +} + +func (s *driverPlugin) GetTotalAndFreeSpace() (unit.Bytes, unit.Bytes, error) { + return s.instance.GetTotalAndFreeSpace() +} + +// Get the data from the storage driver in io stream. +func (s *driverPlugin) Get(raw *Raw) (io.ReadCloser, error) { + if err := checkEmptyKey(raw); err != nil { + return nil, err + } + return s.instance.Get(raw) +} + +// GetBytes gets the data from the storage driver in bytes. +func (s *driverPlugin) GetBytes(raw *Raw) ([]byte, error) { + if err := checkEmptyKey(raw); err != nil { + return nil, err + } + return s.instance.GetBytes(raw) +} + +// Put puts data into the storage in io stream. +func (s *driverPlugin) Put(raw *Raw, data io.Reader) error { + if err := checkEmptyKey(raw); err != nil { + return err + } + return s.instance.Put(raw, data) +} + +// PutBytes puts data into the storage in bytes. +func (s *driverPlugin) PutBytes(raw *Raw, data []byte) error { + if err := checkEmptyKey(raw); err != nil { + return err + } + return s.instance.PutBytes(raw, data) +} + +// AppendBytes append data into storage in bytes. +//func (s *Store) AppendBytes(ctx context.Context, raw *Raw, data []byte) error { +// if err := checkEmptyKey(raw); err != nil { +// return err +// } +// return s.driver.AppendBytes(ctx, raw, data) +//} + +// Remove the data from the storage based on raw information. +func (s *driverPlugin) Remove(raw *Raw) error { + if raw == nil || (stringutils.IsBlank(raw.Key) && + stringutils.IsBlank(raw.Bucket)) { + return errors.Wrapf(cdnerrors.ErrInvalidValue, "cannot set both key and bucket empty at the same time") + } + return s.instance.Remove(raw) +} + +// Stat determines whether the data exists based on raw information. +// If that, and return some info that in the form of struct StorageInfo. +// If not, return the ErrNotFound. +func (s *driverPlugin) Stat(raw *Raw) (*StorageInfo, error) { + if err := checkEmptyKey(raw); err != nil { + return nil, err + } + return s.instance.Stat(raw) +} + +// Walk walks the file tree rooted at root which determined by raw.Bucket and raw.Key, +// calling walkFn for each file or directory in the tree, including root. +func (s *driverPlugin) Walk(raw *Raw) error { + return s.instance.Walk(raw) +} + +func (s *driverPlugin) GetPath(raw *Raw) string { + return s.instance.GetPath(raw) +} + +func (s *driverPlugin) MoveFile(src string, dst string) error { + return s.instance.MoveFile(src, dst) +} + +// GetAvailSpace returns the available disk space in B. +func (s *driverPlugin) GetAvailSpace() (unit.Bytes, error) { + return s.instance.GetAvailSpace() +} + +func (s *driverPlugin) GetHomePath() string { + return s.instance.GetHomePath() +} + +func checkEmptyKey(raw *Raw) error { + if raw == nil || stringutils.IsBlank(raw.Key) { + return errors.Wrapf(cdnerrors.ErrInvalidValue, "raw key is empty") + } + return nil } diff --git a/cdnsystem/storedriver/local/local_driver.go b/cdnsystem/storedriver/local/local_driver.go index e7d840011..14fb17c94 100644 --- a/cdnsystem/storedriver/local/local_driver.go +++ b/cdnsystem/storedriver/local/local_driver.go @@ -17,119 +17,71 @@ package local import ( - "context" "fmt" "io" "io/ioutil" "os" "path/filepath" - "reflect" - "time" "d7y.io/dragonfly/v2/cdnsystem/cdnerrors" "d7y.io/dragonfly/v2/cdnsystem/storedriver" + logger "d7y.io/dragonfly/v2/pkg/dflog" "d7y.io/dragonfly/v2/pkg/synclock" "d7y.io/dragonfly/v2/pkg/unit" "d7y.io/dragonfly/v2/pkg/util/fileutils" "d7y.io/dragonfly/v2/pkg/util/statutils" - "github.com/mitchellh/mapstructure" "github.com/pkg/errors" - "gopkg.in/yaml.v3" ) -func init() { - // Ensure that storage implements the StorageDriver interface - var storage *diskStorage = nil - var _ storedriver.Driver = storage -} - -const StorageDriver = "disk" +// Ensure driver implements the storedriver.Driver interface +var _ storedriver.Driver = (*driver)(nil) -const MemoryStorageDriver = "memory" +const ( + DiskDriverName = "disk" + MemoryDriverName = "memory" +) var fileLocker = synclock.NewLockerPool() func init() { - storedriver.Register(StorageDriver, NewStorage) - storedriver.Register(MemoryStorageDriver, NewStorage) + storedriver.Register(DiskDriverName, NewStorageDriver) + storedriver.Register(MemoryDriverName, NewStorageDriver) } -// diskStorage is one of the implementations of StorageDriver using local disk file system. -type diskStorage struct { +// driver is one of the implementations of storage Driver using local file system. +type driver struct { // BaseDir is the dir that local storage driver will store content based on it. BaseDir string - // GcConfig - GcConfig *storedriver.GcConfig } -// NewStorage performs initialization for disk Storage and return a StorageDriver. -func NewStorage(conf interface{}) (storedriver.Driver, error) { - cfg := &diskStorage{} - decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ - DecodeHook: decodeHock( - reflect.TypeOf(time.Second), - reflect.TypeOf(unit.B)), - Result: cfg, - }) - if err != nil { - return nil, fmt.Errorf("failed to create decoder: %v", err) - } - err = decoder.Decode(conf) - if err != nil { - return nil, fmt.Errorf("failed to parse config: %v", err) - } - // prepare the base dir - if !filepath.IsAbs(cfg.BaseDir) { - return nil, fmt.Errorf("not absolute path: %s", cfg.BaseDir) - } - if err := fileutils.MkdirAll(cfg.BaseDir); err != nil { - return nil, fmt.Errorf("failed to create baseDir%s: %v", cfg.BaseDir, err) - } - - return &diskStorage{ - BaseDir: cfg.BaseDir, - GcConfig: cfg.GcConfig, +// NewStorageDriver performs initialization for disk Storage and return a storage Driver. +func NewStorageDriver(cfg *storedriver.Config) (storedriver.Driver, error) { + return &driver{ + BaseDir: cfg.BaseDir, }, nil } -func decodeHock(types ...reflect.Type) mapstructure.DecodeHookFunc { - return func(f, t reflect.Type, data interface{}) (interface{}, error) { - for _, typ := range types { - if t == typ { - b, _ := yaml.Marshal(data) - v := reflect.New(t) - return v.Interface(), yaml.Unmarshal(b, v.Interface()) - } - } - return data, nil - } -} - -func (ds *diskStorage) GetTotalSpace(ctx context.Context) (unit.Bytes, error) { +func (ds *driver) GetTotalSpace() (unit.Bytes, error) { path := ds.BaseDir lock(path, -1, true) defer unLock(path, -1, true) return fileutils.GetTotalSpace(path) } -func (ds *diskStorage) GetHomePath(ctx context.Context) string { +func (ds *driver) GetHomePath() string { return ds.BaseDir } -func (ds *diskStorage) GetGcConfig(ctx context.Context) *storedriver.GcConfig { - return ds.GcConfig -} - -func (ds *diskStorage) CreateBaseDir(ctx context.Context) error { +func (ds *driver) CreateBaseDir() error { return os.MkdirAll(ds.BaseDir, os.ModePerm) } -func (ds *diskStorage) MoveFile(src string, dst string) error { +func (ds *driver) MoveFile(src string, dst string) error { return fileutils.MoveFile(src, dst) } // Get the content of key from storage and return in io stream. -func (ds *diskStorage) Get(ctx context.Context, raw *storedriver.Raw) (io.ReadCloser, error) { +func (ds *driver) Get(raw *storedriver.Raw) (io.ReadCloser, error) { path, info, err := ds.statPath(raw.Bucket, raw.Key) if err != nil { return nil, err @@ -150,22 +102,30 @@ func (ds *diskStorage) Get(ctx context.Context, raw *storedriver.Raw) (io.ReadCl if err != nil { return } - defer f.Close() + defer func() { + if err := f.Close(); err != nil { + logger.Error("failed to close file %s: %v", f, err) + } + }() - f.Seek(raw.Offset, io.SeekStart) + if _, err := f.Seek(raw.Offset, io.SeekStart); err != nil { + logger.Errorf("failed to seek file %s: %v", f, err) + } var reader io.Reader reader = f if raw.Length > 0 { reader = io.LimitReader(f, raw.Length) } buf := make([]byte, 256*1024) - io.CopyBuffer(w, reader, buf) + if _, err := io.CopyBuffer(w, reader, buf); err != nil { + logger.Errorf("failed to copy buffer from file %s: %v", f, err) + } }(w) return r, nil } // GetBytes gets the content of key from storage and return in bytes. -func (ds *diskStorage) GetBytes(ctx context.Context, raw *storedriver.Raw) (data []byte, err error) { +func (ds *driver) GetBytes(raw *storedriver.Raw) (data []byte, err error) { path, info, err := ds.statPath(raw.Bucket, raw.Key) if err != nil { return nil, err @@ -182,9 +142,15 @@ func (ds *diskStorage) GetBytes(ctx context.Context, raw *storedriver.Raw) (data if err != nil { return nil, err } - defer f.Close() + defer func() { + if err := f.Close(); err != nil { + logger.Errorf("failed to close file %s: %v", f, err) + } + }() - f.Seek(raw.Offset, io.SeekStart) + if _, err := f.Seek(raw.Offset, io.SeekStart); err != nil { + return nil, err + } if raw.Length == 0 { data, err = ioutil.ReadAll(f) } else { @@ -198,7 +164,7 @@ func (ds *diskStorage) GetBytes(ctx context.Context, raw *storedriver.Raw) (data } // Put reads the content from reader and put it into storage. -func (ds *diskStorage) Put(ctx context.Context, raw *storedriver.Raw, data io.Reader) error { +func (ds *driver) Put(raw *storedriver.Raw, data io.Reader) error { if err := storedriver.CheckPutRaw(raw); err != nil { return err } @@ -220,22 +186,31 @@ func (ds *diskStorage) Put(ctx context.Context, raw *storedriver.Raw, data io.Re if err = storedriver.CheckTrunc(raw); err != nil { return err } - f, err = fileutils.OpenFile(path, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0644) + if f, err = fileutils.OpenFile(path, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0644); err != nil { + return err + } } else if raw.Append { - f, err = fileutils.OpenFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) + if f, err = fileutils.OpenFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644); err != nil { + return err + } } else { - f, err = fileutils.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644) - } - if err != nil { - return err + if f, err = fileutils.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644); err != nil { + return err + } } - defer f.Close() + defer func() { + if err := f.Close(); err != nil { + logger.Errorf("failed to close file %s: %v", f, err) + } + }() if raw.Trunc { if err = f.Truncate(raw.TruncSize); err != nil { return err } } - f.Seek(raw.Offset, io.SeekStart) + if _, err := f.Seek(raw.Offset, io.SeekStart); err != nil { + return err + } if raw.Length > 0 { if _, err = io.CopyN(f, data, raw.Length); err != nil { return err @@ -252,7 +227,7 @@ func (ds *diskStorage) Put(ctx context.Context, raw *storedriver.Raw, data io.Re } // PutBytes puts the content of key from storage with bytes. -func (ds *diskStorage) PutBytes(ctx context.Context, raw *storedriver.Raw, data []byte) error { +func (ds *driver) PutBytes(raw *storedriver.Raw, data []byte) error { if err := storedriver.CheckPutRaw(raw); err != nil { return err } @@ -276,13 +251,19 @@ func (ds *diskStorage) PutBytes(ctx context.Context, raw *storedriver.Raw, data if err != nil { return err } - defer f.Close() + defer func() { + if err := f.Close(); err != nil { + logger.Errorf("failed to close file %s: %v", f, err) + } + }() if raw.Trunc { if err = f.Truncate(raw.TruncSize); err != nil { return err } } - f.Seek(raw.Offset, io.SeekStart) + if _, err := f.Seek(raw.Offset, io.SeekStart); err != nil { + return err + } if raw.Length > 0 { if _, err := f.Write(data[:raw.Length]); err != nil { return err @@ -295,40 +276,8 @@ func (ds *diskStorage) PutBytes(ctx context.Context, raw *storedriver.Raw, data return nil } -//// AppendBytes append the content to end of storage file. -//func (ds *diskStorage) AppendBytes(ctx context.Context, raw *store.Raw, data []byte) error { -// if err := store.CheckPutRaw(raw); err != nil { -// return err -// } -// -// path, err := ds.preparePath(raw.Bucket, raw.Key) -// if err != nil { -// return err -// } -// -// lock(path, raw.Offset, false) -// defer unLock(path, raw.Offset, false) -// -// f, err := fileutils.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644) -// if err != nil { -// return err -// } -// defer f.Close() -// if raw.Length == 0 { -// if _, err := f.Write(data); err != nil { -// return err -// } -// return nil -// } -// -// if _, err := f.Write(data[:raw.Length]); err != nil { -// return err -// } -// return nil -//} - // Stat determines whether the file exists. -func (ds *diskStorage) Stat(ctx context.Context, raw *storedriver.Raw) (*storedriver.StorageInfo, error) { +func (ds *driver) Stat(raw *storedriver.Raw) (*storedriver.StorageInfo, error) { _, fileInfo, err := ds.statPath(raw.Bucket, raw.Key) if err != nil { return nil, err @@ -342,14 +291,14 @@ func (ds *diskStorage) Stat(ctx context.Context, raw *storedriver.Raw) (*storedr } // Exits if filepath exists, include symbol link -func (ds *diskStorage) Exits(ctx context.Context, raw *storedriver.Raw) bool { +func (ds *driver) Exits(raw *storedriver.Raw) bool { filePath := filepath.Join(ds.BaseDir, raw.Bucket, raw.Key) return fileutils.PathExist(filePath) } // Remove delete a file or dir. // It will force delete the file or dir when the raw.Trunc is true. -func (ds *diskStorage) Remove(ctx context.Context, raw *storedriver.Raw) error { +func (ds *driver) Remove(raw *storedriver.Raw) error { path, info, err := ds.statPath(raw.Bucket, raw.Key) if err != nil { return err @@ -369,14 +318,14 @@ func (ds *diskStorage) Remove(ctx context.Context, raw *storedriver.Raw) error { } // GetAvailSpace returns the available disk space in Byte. -func (ds *diskStorage) GetAvailSpace(ctx context.Context) (unit.Bytes, error) { +func (ds *driver) GetAvailSpace() (unit.Bytes, error) { path := ds.BaseDir lock(path, -1, true) defer unLock(path, -1, true) return fileutils.GetFreeSpace(path) } -func (ds *diskStorage) GetTotalAndFreeSpace(ctx context.Context) (unit.Bytes, unit.Bytes, error) { +func (ds *driver) GetTotalAndFreeSpace() (unit.Bytes, unit.Bytes, error) { path := ds.BaseDir lock(path, -1, true) defer unLock(path, -1, true) @@ -385,7 +334,7 @@ func (ds *diskStorage) GetTotalAndFreeSpace(ctx context.Context) (unit.Bytes, un // Walk walks the file tree rooted at root which determined by raw.Bucket and raw.Key, // calling walkFn for each file or directory in the tree, including root. -func (ds *diskStorage) Walk(ctx context.Context, raw *storedriver.Raw) error { +func (ds *driver) Walk(raw *storedriver.Raw) error { path, _, err := ds.statPath(raw.Bucket, raw.Key) if err != nil { return err @@ -397,14 +346,14 @@ func (ds *diskStorage) Walk(ctx context.Context, raw *storedriver.Raw) error { return filepath.Walk(path, raw.WalkFn) } -func (ds *diskStorage) GetPath(raw *storedriver.Raw) string { +func (ds *driver) GetPath(raw *storedriver.Raw) string { return filepath.Join(ds.BaseDir, raw.Bucket, raw.Key) } // helper function // preparePath gets the target path and creates the upper directory if it does not exist. -func (ds *diskStorage) preparePath(bucket, key string) (string, error) { +func (ds *driver) preparePath(bucket, key string) (string, error) { dir := filepath.Join(ds.BaseDir, bucket) if err := fileutils.MkdirAll(dir); err != nil { return "", err @@ -414,7 +363,7 @@ func (ds *diskStorage) preparePath(bucket, key string) (string, error) { } // statPath determines whether the target file exists and returns an fileMutex if so. -func (ds *diskStorage) statPath(bucket, key string) (string, os.FileInfo, error) { +func (ds *driver) statPath(bucket, key string) (string, os.FileInfo, error) { filePath := filepath.Join(ds.BaseDir, bucket, key) f, err := os.Stat(filePath) if err != nil { diff --git a/cdnsystem/storedriver/local/local_driver_test.go b/cdnsystem/storedriver/local/local_driver_test.go index 2556853e0..a6feb963d 100644 --- a/cdnsystem/storedriver/local/local_driver_test.go +++ b/cdnsystem/storedriver/local/local_driver_test.go @@ -22,7 +22,6 @@ import ( "io/ioutil" "os" "path/filepath" - "reflect" "strings" "sync" "testing" @@ -46,15 +45,8 @@ type StorageTestSuite struct { func (s *StorageTestSuite) SetupSuite() { s.workHome, _ = ioutil.TempDir("/tmp", "cdn-StoreTestSuite-repo") - store, err := NewStorage(map[string]interface{}{ - "baseDir": s.workHome, - //"gcConfig": map[string]interface{}{ - // "youngGCThreshold": "100G", - // "fullGCThreshold": "5G", - // "cleanRatio": 1, - // "intervalThreshold": "2h", - //}, - }) + store, err := NewStorageDriver(&storedriver.Config{ + BaseDir: "/tmp/download"}) s.Nil(err) s.NotNil(store) s.Driver = store @@ -176,11 +168,11 @@ func (s *StorageTestSuite) TestGetPutBytes() { for _, v := range cases { s.Run(v.name, func() { // put - err := s.PutBytes(context.Background(), v.putRaw, v.data) + err := s.PutBytes(v.putRaw, v.data) s.Nil(err) // get - result, err := s.GetBytes(context.Background(), v.getRaw) + result, err := s.GetBytes(v.getRaw) s.True(v.getErrCheck(err)) s.Equal(v.expected, string(result)) // stat @@ -274,10 +266,10 @@ func (s *StorageTestSuite) TestGetPut() { for _, v := range cases { s.Run(v.name, func() { // put - err := s.Put(context.Background(), v.putRaw, v.data) + err := s.Put(v.putRaw, v.data) s.Nil(err) // get - r, err := s.Get(context.Background(), v.getRaw) + r, err := s.Get(v.getRaw) s.True(v.getErrCheck(err)) if err == nil { result, err := ioutil.ReadAll(r) @@ -399,12 +391,12 @@ func (s *StorageTestSuite) TestAppendBytes() { for _, v := range cases { s.Run(v.name, func() { // put - err := s.Put(context.Background(), v.putRaw, v.data) + err := s.Put(v.putRaw, v.data) s.Nil(err) - err = s.Put(context.Background(), v.appendRaw, v.appData) + err = s.Put(v.appendRaw, v.appData) s.Nil(err) // get - r, err := s.Get(context.Background(), v.getRaw) + r, err := s.Get(v.getRaw) s.True(v.getErrCheck(err)) if err == nil { result, err := ioutil.ReadAll(r) @@ -475,13 +467,13 @@ func (s *StorageTestSuite) TestPutTrunc() { } for _, v := range cases { - err := s.Put(context.Background(), originRaw, strings.NewReader(originData)) + err := s.Put(originRaw, strings.NewReader(originData)) s.Nil(err) - err = s.Put(context.Background(), v.truncRaw, v.data) + err = s.Put(v.truncRaw, v.data) s.Nil(err) - r, err := s.Get(context.Background(), &storedriver.Raw{ + r, err := s.Get(&storedriver.Raw{ Key: "fooTrunc.meta", }) s.Nil(err) @@ -505,7 +497,7 @@ func (s *StorageTestSuite) TestPutParallel() { wg.Add(1) go func(i int) { defer wg.Done() - s.Put(context.TODO(), &storedriver.Raw{ + s.Put(&storedriver.Raw{ Key: key, Offset: int64(i) * int64(testStrLength), }, strings.NewReader(testStr)) @@ -513,7 +505,7 @@ func (s *StorageTestSuite) TestPutParallel() { } wg.Wait() - info, err := s.Stat(context.TODO(), &storedriver.Raw{Key: key}) + info, err := s.Stat(&storedriver.Raw{Key: key}) s.Nil(err) s.Equal(info.Size, int64(routineCount)*int64(testStrLength)) } @@ -523,7 +515,6 @@ func (s *StorageTestSuite) TestRemove() { BaseDir string } type args struct { - ctx context.Context raw *storedriver.Raw } tests := []struct { @@ -535,7 +526,7 @@ func (s *StorageTestSuite) TestRemove() { {}, } for _, tt := range tests { - err := s.Remove(tt.args.ctx, tt.args.raw) + err := s.Remove(tt.args.raw) s.Equal(err != nil, tt.wantErr) } } @@ -545,7 +536,6 @@ func (s *StorageTestSuite) TestStat() { BaseDir string } type args struct { - ctx context.Context raw *storedriver.Raw } tests := []struct { @@ -558,7 +548,7 @@ func (s *StorageTestSuite) TestStat() { {}, } for _, tt := range tests { - got, err := s.Stat(tt.args.ctx, tt.args.raw) + got, err := s.Stat(tt.args.raw) s.Equal(err, tt.wantErr) s.EqualValues(got, tt.want) } @@ -566,8 +556,7 @@ func (s *StorageTestSuite) TestStat() { func Test_diskStorage_CreateBaseDir(t *testing.T) { type fields struct { - BaseDir string - GcConfig *storedriver.GcConfig + BaseDir string } type args struct { ctx context.Context @@ -582,11 +571,10 @@ func Test_diskStorage_CreateBaseDir(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ds := &diskStorage{ - BaseDir: tt.fields.BaseDir, - GcConfig: tt.fields.GcConfig, + ds := &driver{ + BaseDir: tt.fields.BaseDir, } - if err := ds.CreateBaseDir(tt.args.ctx); (err != nil) != tt.wantErr { + if err := ds.CreateBaseDir(); (err != nil) != tt.wantErr { t.Errorf("CreateBaseDir() error = %v, wantErr %v", err, tt.wantErr) } }) @@ -595,8 +583,7 @@ func Test_diskStorage_CreateBaseDir(t *testing.T) { func Test_diskStorage_Exits(t *testing.T) { type fields struct { - BaseDir string - GcConfig *storedriver.GcConfig + BaseDir string } type args struct { ctx context.Context @@ -612,50 +599,19 @@ func Test_diskStorage_Exits(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ds := &diskStorage{ - BaseDir: tt.fields.BaseDir, - GcConfig: tt.fields.GcConfig, + ds := &driver{ + BaseDir: tt.fields.BaseDir, } - if got := ds.Exits(tt.args.ctx, tt.args.raw); got != tt.want { + if got := ds.Exits(tt.args.raw); got != tt.want { t.Errorf("Exits() = %v, want %v", got, tt.want) } }) } } -func Test_diskStorage_GetGcConfig(t *testing.T) { - type fields struct { - BaseDir string - GcConfig *storedriver.GcConfig - } - type args struct { - ctx context.Context - } - tests := []struct { - name string - fields fields - args args - want *storedriver.GcConfig - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ds := &diskStorage{ - BaseDir: tt.fields.BaseDir, - GcConfig: tt.fields.GcConfig, - } - if got := ds.GetGcConfig(tt.args.ctx); !reflect.DeepEqual(got, tt.want) { - t.Errorf("GetGcConfig() = %v, want %v", got, tt.want) - } - }) - } -} - func Test_diskStorage_GetHomePath(t *testing.T) { type fields struct { - BaseDir string - GcConfig *storedriver.GcConfig + BaseDir string } type args struct { ctx context.Context @@ -670,11 +626,10 @@ func Test_diskStorage_GetHomePath(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ds := &diskStorage{ - BaseDir: tt.fields.BaseDir, - GcConfig: tt.fields.GcConfig, + ds := &driver{ + BaseDir: tt.fields.BaseDir, } - if got := ds.GetHomePath(tt.args.ctx); got != tt.want { + if got := ds.GetHomePath(); got != tt.want { t.Errorf("GetHomePath() = %v, want %v", got, tt.want) } }) @@ -683,8 +638,7 @@ func Test_diskStorage_GetHomePath(t *testing.T) { func Test_diskStorage_GetPath(t *testing.T) { type fields struct { - BaseDir string - GcConfig *storedriver.GcConfig + BaseDir string } type args struct { raw *storedriver.Raw @@ -699,9 +653,8 @@ func Test_diskStorage_GetPath(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ds := &diskStorage{ - BaseDir: tt.fields.BaseDir, - GcConfig: tt.fields.GcConfig, + ds := &driver{ + BaseDir: tt.fields.BaseDir, } if got := ds.GetPath(tt.args.raw); got != tt.want { t.Errorf("GetPath() = %v, want %v", got, tt.want) @@ -712,8 +665,7 @@ func Test_diskStorage_GetPath(t *testing.T) { func Test_diskStorage_GetTotalAndFreeSpace(t *testing.T) { type fields struct { - BaseDir string - GcConfig *storedriver.GcConfig + BaseDir string } type args struct { ctx context.Context @@ -730,11 +682,10 @@ func Test_diskStorage_GetTotalAndFreeSpace(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ds := &diskStorage{ - BaseDir: tt.fields.BaseDir, - GcConfig: tt.fields.GcConfig, + ds := &driver{ + BaseDir: tt.fields.BaseDir, } - got, got1, err := ds.GetTotalAndFreeSpace(tt.args.ctx) + got, got1, err := ds.GetTotalAndFreeSpace() if (err != nil) != tt.wantErr { t.Errorf("GetTotalAndFreeSpace() error = %v, wantErr %v", err, tt.wantErr) return @@ -751,8 +702,7 @@ func Test_diskStorage_GetTotalAndFreeSpace(t *testing.T) { func Test_diskStorage_GetTotalSpace(t *testing.T) { type fields struct { - BaseDir string - GcConfig *storedriver.GcConfig + BaseDir string } type args struct { ctx context.Context @@ -768,11 +718,10 @@ func Test_diskStorage_GetTotalSpace(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ds := &diskStorage{ - BaseDir: tt.fields.BaseDir, - GcConfig: tt.fields.GcConfig, + ds := &driver{ + BaseDir: tt.fields.BaseDir, } - got, err := ds.GetTotalSpace(tt.args.ctx) + got, err := ds.GetTotalSpace() if (err != nil) != tt.wantErr { t.Errorf("GetTotalSpace() error = %v, wantErr %v", err, tt.wantErr) return @@ -790,7 +739,7 @@ func (s *StorageTestSuite) BenchmarkPutParallel() { wg.Add(1) go func(i int) { defer wg.Done() - s.Put(context.Background(), &storedriver.Raw{ + s.Put(&storedriver.Raw{ Key: "foo.bech", Offset: int64(i) * 5, }, strings.NewReader("hello")) @@ -801,7 +750,7 @@ func (s *StorageTestSuite) BenchmarkPutParallel() { func (s *StorageTestSuite) BenchmarkPutSerial() { for k := 0; k < 1000; k++ { - s.Put(context.Background(), &storedriver.Raw{ + s.Put(&storedriver.Raw{ Key: "foo1.bech", Offset: int64(k) * 5, }, strings.NewReader("hello")) @@ -812,7 +761,7 @@ func (s *StorageTestSuite) BenchmarkPutSerial() { // helper function func (s *StorageTestSuite) checkStat(raw *storedriver.Raw) { - info, err := s.Stat(context.Background(), raw) + info, err := s.Stat(raw) s.Equal(isNilError(err), true) pathTemp := filepath.Join(s.workHome, raw.Bucket, raw.Key) @@ -827,10 +776,10 @@ func (s *StorageTestSuite) checkStat(raw *storedriver.Raw) { } func (s *StorageTestSuite) checkRemove(raw *storedriver.Raw) { - err := s.Remove(context.Background(), raw) + err := s.Remove(raw) s.Equal(isNilError(err), true) - _, err = s.Stat(context.Background(), raw) + _, err = s.Stat(raw) s.Equal(cdnerrors.IsFileNotExist(err), true) } diff --git a/cdnsystem/storedriver/store.go b/cdnsystem/storedriver/store.go deleted file mode 100644 index 4d2ae7849..000000000 --- a/cdnsystem/storedriver/store.go +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright 2020 The Dragonfly Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package storedriver - -import ( - "context" - - "fmt" - "io" - - "d7y.io/dragonfly/v2/cdnsystem/cdnerrors" - "d7y.io/dragonfly/v2/cdnsystem/config" - "d7y.io/dragonfly/v2/pkg/unit" - "d7y.io/dragonfly/v2/pkg/util/stringutils" - "github.com/pkg/errors" -) - -func init() { - // Ensure that storage implements the StorageDriver interface - var storage *Store = nil - var _ Driver = storage -} - -// Store is a wrapper of the storage which implements the interface of StorageDriver. -type Store struct { - // name is a unique identifier, you can also name it ID. - driverName string - // config is used to init storage driver. - config interface{} - // driver holds a storage which implements the interface of StorageDriver. - driver Driver -} - -// NewStore creates a new Store instance. -func NewStore(name string, builder StorageBuilder, cfg interface{}) (*Store, error) { - if name == "" || builder == nil { - return nil, fmt.Errorf("plugin name or builder cannot be nil") - } - - // init driver with specific config - driver, err := builder(cfg) - if err != nil { - return nil, fmt.Errorf("failed to init storage driver %s: %v", name, err) - } - - return &Store{ - driverName: name, - config: cfg, - driver: driver, - }, nil -} - -// Type returns the plugin type: StoragePlugin. -func (s *Store) Type() config.PluginType { - return config.StoragePlugin -} - -// Name returns the plugin name. -func (s *Store) Name() string { - return s.driverName -} - -// GetTotalSpace -func (s *Store) GetTotalSpace(ctx context.Context) (unit.Bytes, error) { - return s.driver.GetTotalSpace(ctx) -} - -// CreateBaseDir -func (s *Store) CreateBaseDir(ctx context.Context) error { - return s.driver.CreateBaseDir(ctx) -} - -func (s *Store) Exits(ctx context.Context, raw *Raw) bool { - return s.driver.Exits(ctx, raw) -} - -func (s *Store) GetTotalAndFreeSpace(ctx context.Context) (unit.Bytes, unit.Bytes, error) { - return s.driver.GetTotalAndFreeSpace(ctx) -} - -// Get the data from the storage driver in io stream. -func (s *Store) Get(ctx context.Context, raw *Raw) (io.ReadCloser, error) { - if err := checkEmptyKey(raw); err != nil { - return nil, err - } - return s.driver.Get(ctx, raw) -} - -// GetBytes gets the data from the storage driver in bytes. -func (s *Store) GetBytes(ctx context.Context, raw *Raw) ([]byte, error) { - if err := checkEmptyKey(raw); err != nil { - return nil, err - } - return s.driver.GetBytes(ctx, raw) -} - -// Put puts data into the storage in io stream. -func (s *Store) Put(ctx context.Context, raw *Raw, data io.Reader) error { - if err := checkEmptyKey(raw); err != nil { - return err - } - return s.driver.Put(ctx, raw, data) -} - -// PutBytes puts data into the storage in bytes. -func (s *Store) PutBytes(ctx context.Context, raw *Raw, data []byte) error { - if err := checkEmptyKey(raw); err != nil { - return err - } - return s.driver.PutBytes(ctx, raw, data) -} - -// AppendBytes append data into storage in bytes. -//func (s *Store) AppendBytes(ctx context.Context, raw *Raw, data []byte) error { -// if err := checkEmptyKey(raw); err != nil { -// return err -// } -// return s.driver.AppendBytes(ctx, raw, data) -//} - -// Remove the data from the storage based on raw information. -func (s *Store) Remove(ctx context.Context, raw *Raw) error { - if raw == nil || (stringutils.IsBlank(raw.Key) && - stringutils.IsBlank(raw.Bucket)) { - return errors.Wrapf(cdnerrors.ErrInvalidValue, "cannot set both key and bucket empty at the same time") - } - return s.driver.Remove(ctx, raw) -} - -// Stat determines whether the data exists based on raw information. -// If that, and return some info that in the form of struct StorageInfo. -// If not, return the ErrNotFound. -func (s *Store) Stat(ctx context.Context, raw *Raw) (*StorageInfo, error) { - if err := checkEmptyKey(raw); err != nil { - return nil, err - } - return s.driver.Stat(ctx, raw) -} - -// Walk walks the file tree rooted at root which determined by raw.Bucket and raw.Key, -// calling walkFn for each file or directory in the tree, including root. -func (s *Store) Walk(ctx context.Context, raw *Raw) error { - return s.driver.Walk(ctx, raw) -} - -func (s *Store) GetPath(raw *Raw) string { - return s.driver.GetPath(raw) -} - -func (s *Store) MoveFile(src string, dst string) error { - return s.driver.MoveFile(src, dst) -} - -// GetAvailSpace returns the available disk space in B. -func (s *Store) GetAvailSpace(ctx context.Context) (unit.Bytes, error) { - return s.driver.GetAvailSpace(ctx) -} - -func (s *Store) GetHomePath(ctx context.Context) string { - return s.driver.GetHomePath(ctx) -} - -func (s *Store) GetGcConfig(ctx context.Context) *GcConfig { - return s.driver.GetGcConfig(ctx) -} - -func checkEmptyKey(raw *Raw) error { - if raw == nil || stringutils.IsBlank(raw.Key) { - return errors.Wrapf(cdnerrors.ErrInvalidValue, "raw key is empty") - } - return nil -} diff --git a/cdnsystem/storedriver/store_mgr.go b/cdnsystem/storedriver/store_mgr.go index 8d766bd3f..94848516b 100644 --- a/cdnsystem/storedriver/store_mgr.go +++ b/cdnsystem/storedriver/store_mgr.go @@ -18,34 +18,48 @@ package storedriver import ( "fmt" + "path/filepath" "strings" - "d7y.io/dragonfly/v2/cdnsystem/config" "d7y.io/dragonfly/v2/cdnsystem/plugins" + "d7y.io/dragonfly/v2/pkg/util/fileutils" + "github.com/mitchellh/mapstructure" ) -// StorageBuilder is a function that creates a new storage plugin instant with the giving conf. -type StorageBuilder func(conf interface{}) (Driver, error) +// DriverBuilder is a function that creates a new storage driver plugin instant with the giving Config. +type DriverBuilder func(cfg *Config) (Driver, error) // Register defines an interface to register a driver with specified name. // All drivers should call this function to register itself to the driverFactory. -func Register(name string, builder StorageBuilder) { +func Register(name string, builder DriverBuilder) { name = strings.ToLower(name) // plugin builder - var f plugins.Builder = func(conf interface{}) (plugin plugins.Plugin, e error) { - return NewStore(name, builder, conf) + var f = func(conf interface{}) (plugins.Plugin, error) { + cfg := &Config{} + if err := mapstructure.Decode(conf, cfg); err != nil { + return nil, fmt.Errorf("failed to parse config: %v", err) + } + // prepare the base dir + if !filepath.IsAbs(cfg.BaseDir) { + return nil, fmt.Errorf("not absolute path: %s", cfg.BaseDir) + } + if err := fileutils.MkdirAll(cfg.BaseDir); err != nil { + return nil, fmt.Errorf("failed to create baseDir%s: %v", cfg.BaseDir, err) + } + + return newDriverPlugin(name, builder, cfg) } - plugins.RegisterPlugin(config.StoragePlugin, name, f) + plugins.RegisterPluginBuilder(plugins.StorageDriverPlugin, name, f) } // Get a store from manager with specified name. -func Get(name string) (*Store, error) { - v := plugins.GetPlugin(config.StoragePlugin, strings.ToLower(name)) +func Get(name string) (Driver, error) { + v := plugins.GetPlugin(plugins.StorageDriverPlugin, strings.ToLower(name)) if v == nil { return nil, fmt.Errorf("storage: %s not existed", name) } - if store, ok := v.(*Store); ok { - return store, nil + if plugin, ok := v.(*driverPlugin); ok { + return plugin.instance, nil } - return nil, fmt.Errorf("get store error: unknown reason") + return nil, fmt.Errorf("get store driver %s error: unknown reason", name) } diff --git a/cdnsystem/storedriver/store_mgr_test.go b/cdnsystem/storedriver/store_mgr_test.go index 0c6a53897..a7e47d948 100644 --- a/cdnsystem/storedriver/store_mgr_test.go +++ b/cdnsystem/storedriver/store_mgr_test.go @@ -33,7 +33,7 @@ type StoreMgrTestSuite struct { func (s *StoreMgrTestSuite) SetupSuite() { type args struct { name string - builder StorageBuilder + builder DriverBuilder } tests := []struct { name string @@ -43,7 +43,7 @@ func (s *StoreMgrTestSuite) SetupSuite() { name: "test1", args: args{ name: "disk1", - builder: func(conf interface{}) (Driver, error) { + builder: func(cfg *Config) (Driver, error) { return nil, nil }, }, @@ -51,7 +51,7 @@ func (s *StoreMgrTestSuite) SetupSuite() { name: "test2", args: args{ name: "memory1", - builder: func(conf interface{}) (Driver, error) { + builder: func(cfg *Config) (Driver, error) { return nil, nil }, }, @@ -69,16 +69,15 @@ func (s *StoreMgrTestSuite) TestGet() { tests := []struct { name string args args - want *Store + want Driver wantErr bool }{ { name: "test1", args: args{name: "disk"}, - want: &Store{ - driverName: "disk", - config: nil, - driver: nil, + want: &driverPlugin{ + name: "disk", + instance: nil, }, wantErr: false, }, diff --git a/client/daemon/peer/peertask_file.go b/client/daemon/peer/peertask_file.go index 1ff537110..7734b0393 100644 --- a/client/daemon/peer/peertask_file.go +++ b/client/daemon/peer/peertask_file.go @@ -21,6 +21,7 @@ import ( "sync" "sync/atomic" + "d7y.io/dragonfly/v2/pkg/dferrors" "github.com/pkg/errors" "go.opentelemetry.io/otel/semconv" "go.opentelemetry.io/otel/trace" @@ -28,7 +29,6 @@ import ( "d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/pkg/dfcodes" - "d7y.io/dragonfly/v2/pkg/dferrors" logger "d7y.io/dragonfly/v2/pkg/dflog" "d7y.io/dragonfly/v2/pkg/rpc/base" "d7y.io/dragonfly/v2/pkg/rpc/scheduler" diff --git a/client/daemon/peer/peertask_stream.go b/client/daemon/peer/peertask_stream.go index fa0cfdc2a..9bb57048e 100644 --- a/client/daemon/peer/peertask_stream.go +++ b/client/daemon/peer/peertask_stream.go @@ -23,6 +23,7 @@ import ( "sync" "sync/atomic" + "d7y.io/dragonfly/v2/pkg/dferrors" "github.com/go-http-utils/headers" "github.com/pkg/errors" "go.opentelemetry.io/otel/semconv" @@ -32,7 +33,6 @@ import ( "d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/daemon/storage" "d7y.io/dragonfly/v2/pkg/dfcodes" - "d7y.io/dragonfly/v2/pkg/dferrors" logger "d7y.io/dragonfly/v2/pkg/dflog" "d7y.io/dragonfly/v2/pkg/rpc/base" "d7y.io/dragonfly/v2/pkg/rpc/scheduler" diff --git a/cmd/dependency/dependency.go b/cmd/dependency/dependency.go index 0fdf4e751..e609bbb0d 100644 --- a/cmd/dependency/dependency.go +++ b/cmd/dependency/dependency.go @@ -183,7 +183,6 @@ func initConfig(useConfigFile bool, name string, config interface{}) { } } } - if err := viper.Unmarshal(config, initDecoderConfig); err != nil { panic(errors.Wrap(err, "unmarshal config to struct")) } diff --git a/docs/en/config/cdn.yaml b/docs/en/config/cdn.yaml index e413dc5e1..b473fe820 100644 --- a/docs/en/config/cdn.yaml +++ b/docs/en/config/cdn.yaml @@ -32,38 +32,56 @@ base: # default: 2m0s gcMetaInterval: 2m - # gcStorageInterval is the interval time to execute GC storage. - # default: 15s - gcStorageInterval: 15s - # TaskExpireTime when a task is not accessed within the taskExpireTime, # and it will be treated to be expired. # default: 3m0s taskExpireTime: 3m - # StoragePattern is the pattern of storage policy, [disk/hybrid] - storagePattern: disk + # storageMode is the Mode of storage policy, [disk/hybrid] + storageMode: disk plugins: - storage: + storageDriver: - name: disk enable: true config: baseDir: /tmp/cdnsystem2 - gcConfig: - youngGCThreshold: 100G - fullGCThreshold: 5G - cleanRatio: 1 - intervalThreshold: 2h - name: memory enable: true config: baseDir: /tmp/memory/dragonfly - gcConfig: - youngGCThreshold: 100G - fullGCThreshold: 5G - cleanRatio: 3 - intervalThreshold: 2h + + storageManager: + - name: disk + enable: true + config: + gcInitialDelay: 5s + gcInterval: 15s + driverConfigs: + disk: + gcConfig: + youngGCThreshold: 100G + fullGCThreshold: 5G + cleanRatio: 1 + intervalThreshold: 2h + - name: hybrid + enable: false + config: + gcInitialDelay: 5s + gcInterval: 15s + driverConfigs: + disk: + gcConfig: + youngGCThreshold: 100G + fullGCThreshold: 5G + cleanRatio: 1 + intervalThreshold: 2h + memory: + gcConfig: + youngGCThreshold: 100G + fullGCThreshold: 5G + cleanRatio: 3 + intervalThreshold: 2h # Console shows log on console # default: false diff --git a/pkg/util/digestutils/digest.go b/pkg/util/digestutils/digest.go index 4a8119c35..8dbf12880 100644 --- a/pkg/util/digestutils/digest.go +++ b/pkg/util/digestutils/digest.go @@ -23,6 +23,7 @@ import ( "encoding/hex" "hash" "io" + "os" "d7y.io/dragonfly/v2/pkg/unit" "d7y.io/dragonfly/v2/pkg/util/fileutils" @@ -63,7 +64,7 @@ func Md5File(name string) string { return "" } - f, err := fileutils.Open(name) + f, err := os.Open(name) if err != nil { return "" } diff --git a/pkg/util/fileutils/file_utils.go b/pkg/util/fileutils/file_utils.go index 0071533b6..04b41fb38 100644 --- a/pkg/util/fileutils/file_utils.go +++ b/pkg/util/fileutils/file_utils.go @@ -30,9 +30,16 @@ import ( "github.com/pkg/errors" ) +const ( + // PrivateFileMode grants owner to read/write a file. + PrivateFileMode = 0600 + // PrivateDirMode means read and execute access for everyone and also write access for the owner of the directory. + PrivateDirMode = 0755 +) + // MkdirAll creates a directory named path with 0755 perm. func MkdirAll(dir string) error { - return os.MkdirAll(dir, 0755) + return os.MkdirAll(dir, PrivateDirMode) } // DeleteFile deletes a regular file not a directory. @@ -49,7 +56,7 @@ func DeleteFile(path string) error { } // OpenFile opens a file. If the parent directory of the file isn't exist, -// it will create the directory. +// it will create the directory with 0755 perm. func OpenFile(path string, flag int, perm os.FileMode) (*os.File, error) { if !PathExist(path) && (flag&syscall.O_CREAT != 0) { if err := MkdirAll(filepath.Dir(path)); err != nil { @@ -60,10 +67,6 @@ func OpenFile(path string, flag int, perm os.FileMode) (*os.File, error) { return os.OpenFile(path, flag, perm) } -func Open(path string) (*os.File, error) { - return OpenFile(path, syscall.O_RDONLY, 0) -} - // Link creates a hard link pointing to oldname named newname for a file. func Link(oldname string, newname string) error { if PathExist(newname) {