Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: back to source with piece group #2826

Merged
merged 9 commits into from
Dec 27, 2023
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
179 changes: 176 additions & 3 deletions client/daemon/peer/piece_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
"sync"
"time"

mapset "github.com/deckarep/golang-set/v2"
"github.com/go-http-utils/headers"
"go.uber.org/atomic"
"golang.org/x/time/rate"
Expand Down Expand Up @@ -805,9 +806,97 @@
ctx, cancel := context.WithCancel(ctx)
defer cancel()

err := pm.concurrentDownloadSourceByPiece(ctx, pt, peerTaskRequest, parsedRange, continuePieceNum, pieceCount, con, pieceSize, cancel)
if err != nil {
return err
if continuePieceNum > 0 {
return pm.concurrentDownloadSourceByPiece(ctx, pt, peerTaskRequest, parsedRange, continuePieceNum, pieceCount, con, pieceSize, cancel)
}
return pm.concurrentDownloadSourceByPieceGroup(ctx, pt, peerTaskRequest, parsedRange, continuePieceNum, pieceCount, con, pieceSize, cancel)
}

type pieceGroup struct {
start, end int32
startByte, endByte int
}

func (pm *pieceManager) concurrentDownloadSourceByPieceGroup(
ctx context.Context, pt Task, peerTaskRequest *schedulerv1.PeerTaskRequest,
parsedRange *nethttp.Range, startPieceNum int32, pieceCount int32,
con int, pieceSize uint32, cancel context.CancelFunc) error {
// TODO
if startPieceNum > 0 {
return fmt.Errorf("concurrentDownloadSourceByPieceGroup not suport startPieceNum yet")

Check failure on line 826 in client/daemon/peer/piece_manager.go

View workflow job for this annotation

GitHub Actions / Lint

`suport` is a misspelling of `support` (misspell)
}
log := pt.Log()
var downloadError atomic.Value
downloadedPieces := mapset.NewSet[int32]()

wg := sync.WaitGroup{}
wg.Add(con)

minPieceCountPerGroup := pieceCount / int32(con)
reminderPieces := pieceCount % int32(con)

// piece group eg:
// con = 4, piece = 5:
// worker 0: 2
// worker 1: 1
// worker 2: 1
// worker 3: 1
// worker 4: 1
for i := int32(0); i < int32(con); i++ {
go func(i int32) {
var (
start int32
end int32
)
// calculate piece group first and last piece num
if i < reminderPieces {
start = i*minPieceCountPerGroup + i
end = i*minPieceCountPerGroup + minPieceCountPerGroup
} else {
start = i*minPieceCountPerGroup + reminderPieces
end = start + minPieceCountPerGroup - 1
}

// calculate piece group first and last range byte with parsedRange.Start
startByte := int(start)*int(pieceSize) + int(parsedRange.Start)
endByte := int(end+1)*int(pieceSize) - 1 + int(parsedRange.Start)
if endByte > int(parsedRange.Length)-1 {
Fixed Show fixed Hide fixed
endByte = int(parsedRange.Length) - 1
Fixed Show fixed Hide fixed
}

pg := &pieceGroup{
start: start,
end: end,
startByte: startByte,
endByte: endByte,
}

log.Infof("concurrent worker %d start to download piece %d-%d, byte %d-%d", i, start, end, startByte, endByte)
_, _, retryErr := retry.Run(ctx,
pm.concurrentOption.InitBackoff,
pm.concurrentOption.MaxBackoff,
pm.concurrentOption.MaxAttempts,
func() (data any, cancel bool, err error) {
err = pm.downloadPieceGroupFromSource(ctx, pt, log,
peerTaskRequest, pieceSize, pg, parsedRange.Length, pieceCount, downloadedPieces)
return nil, errors.Is(err, context.Canceled), err
})
if retryErr != nil {
// download piece error after many retry, cancel task
cancel()
downloadError.Store(&backSourceError{err: retryErr})
log.Infof("concurrent worker %d failed to download piece group after %d retries, last error: %s",
i, pm.concurrentOption.MaxAttempts, retryErr.Error())
}
wg.Done()
}(i)
}

wg.Wait()

// check error
if downloadError.Load() != nil {
return downloadError.Load().(*backSourceError).err
}

return nil
Expand Down Expand Up @@ -965,3 +1054,87 @@
pt.PublishPieceInfo(pieceNum, uint32(result.Size))
return nil
}

func (pm *pieceManager) downloadPieceGroupFromSource(ctx context.Context,
pt Task, log *logger.SugaredLoggerOnWith,
peerTaskRequest *schedulerv1.PeerTaskRequest,
pieceSize uint32, pg *pieceGroup,
jim3ma marked this conversation as resolved.
Show resolved Hide resolved
totalContentLength int64,
totalPieceCount int32,
downloadedPieces mapset.Set[int32]) error {

backSourceRequest, err := source.NewRequestWithContext(ctx, peerTaskRequest.Url, peerTaskRequest.UrlMeta.Header)
if err != nil {
log.Errorf("build piece %d-%d back source request error: %s", pg.start, pg.end, err)
return err
}

pieceGroupRange := fmt.Sprintf("%d-%d", pg.startByte, pg.endByte)
// FIXME refactor source package, normal Range header is enough
backSourceRequest.Header.Set(source.Range, pieceGroupRange)
backSourceRequest.Header.Set(headers.Range, "bytes="+pieceGroupRange)

log.Debugf("piece %d-%d back source header: %#v", pg.start, pg.end, backSourceRequest.Header)

response, err := source.Download(backSourceRequest)
if err != nil {
log.Errorf("piece %d-%d back source response error: %s", pg.start, pg.end, err)
return err
}
defer response.Body.Close()

err = response.Validate()
if err != nil {
log.Errorf("piece %d-%d back source response validate error: %s", pg.start, pg.end, err)
return err
}

log.Debugf("piece %d-%d back source response ok", pg.start, pg.end)

for i := pg.start; i <= pg.end; i++ {
pieceNum := i
offset := uint64(pg.startByte) + uint64(i-pg.start)*uint64(pieceSize)
size := pieceSize
// update last piece size
if offset+uint64(size)-1 > uint64(pg.endByte) {
size = uint32(uint64(pg.endByte) + 1 - offset)
}

result, md5, err := pm.processPieceFromSource(
pt, response.Body, totalContentLength, pieceNum, offset, size,
func(int64) (int32, int64, bool) {
downloadedPieces.Add(pieceNum)
return totalPieceCount, totalContentLength, downloadedPieces.Cardinality() == int(totalPieceCount)
})
request := &DownloadPieceRequest{
TaskID: pt.GetTaskID(),
PeerID: pt.GetPeerID(),
piece: &commonv1.PieceInfo{
PieceNum: pieceNum,
RangeStart: offset,
RangeSize: uint32(result.Size),
PieceMd5: md5,
PieceOffset: offset,
PieceStyle: 0,
},
}

if err != nil {
log.Errorf("download piece %d error: %s", pieceNum, err)
pt.ReportPieceResult(request, result, detectBackSourceError(err))
return err
}

if result.Size != int64(size) {
log.Errorf("download piece %d size not match, desired: %d, actual: %d", pieceNum, size, result.Size)
pt.ReportPieceResult(request, result, detectBackSourceError(err))
return storage.ErrShortRead
}

pt.ReportPieceResult(request, result, nil)
pt.PublishPieceInfo(pieceNum, uint32(result.Size))

log.Debugf("piece %d done", pieceNum)
}
return nil
}
4 changes: 3 additions & 1 deletion client/daemon/peer/piece_manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -481,7 +481,9 @@ func TestPieceManager_DownloadSource(t *testing.T) {

outputBytes, err := os.ReadFile(output)
assert.Nil(err, "load output file")
assert.Equal(testBytes, outputBytes, "output and desired output must match")
if string(testBytes) != string(outputBytes) {
assert.Equal(string(testBytes), string(outputBytes), "output and desired output must match")
}
})
}
}
Expand Down
2 changes: 1 addition & 1 deletion client/daemon/storage/local_storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ func (t *localTaskStore) WritePiece(ctx context.Context, req *WritePieceRequest)
t.RLock()
if piece, ok := t.Pieces[req.Num]; ok {
t.RUnlock()
t.Debugf("piece %d already exist,ignore writing piece", req.Num)
t.Debugf("piece %d already exist, ignore writing piece", req.Num)
// discard already downloaded data for back source
n, err = io.CopyN(io.Discard, req.Reader, piece.Range.Length)
if err != nil && err != io.EOF {
Expand Down
2 changes: 1 addition & 1 deletion client/daemon/storage/local_storage_subtask.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ func (t *localSubTaskStore) WritePiece(ctx context.Context, req *WritePieceReque
t.RLock()
if piece, ok := t.Pieces[req.Num]; ok {
t.RUnlock()
t.Debugf("piece %d already exist,ignore writing piece", req.Num)
t.Debugf("piece %d already exist, ignore writing piece", req.Num)
// discard already downloaded data for back source
n, err = io.CopyN(io.Discard, req.Reader, piece.Range.Length)
if err != nil && err != io.EOF {
Expand Down
1 change: 1 addition & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,7 @@ require (
github.com/chenzhuoyu/iasm v0.9.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set/v2 v2.3.1 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
Expand Down
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -217,6 +217,8 @@ github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CL
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set/v2 v2.3.1 h1:vjmkvJt/IV27WXPyYQpAh4bRyWJc5Y435D17XQ9QU5A=
github.com/deckarep/golang-set/v2 v2.3.1/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
github.com/denisenkom/go-mssqldb v0.11.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
Expand Down
Loading