Skip to content

Commit

Permalink
support ETag for providers
Browse files Browse the repository at this point in the history
  • Loading branch information
wwqgtxx committed Sep 22, 2024
1 parent 028634a commit 6cb0830
Show file tree
Hide file tree
Showing 7 changed files with 201 additions and 105 deletions.
8 changes: 2 additions & 6 deletions adapter/provider/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,20 +56,16 @@ func (pp *proxySetProvider) HealthCheck() {
}

func (pp *proxySetProvider) Update() error {
elm, same, err := pp.Fetcher.Update()
if err == nil && !same {
pp.OnUpdate(elm)
}
_, _, err := pp.Fetcher.Update()
return err
}

func (pp *proxySetProvider) Initial() error {
elm, err := pp.Fetcher.Initial()
_, err := pp.Fetcher.Initial()
if err != nil {
return err
}

pp.OnUpdate(elm)
return nil
}

Expand Down
55 changes: 55 additions & 0 deletions component/profile/cachefile/cache.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package cachefile

import (
"math"
"os"
"sync"
"time"
Expand All @@ -19,6 +20,7 @@ var (

bucketSelected = []byte("selected")
bucketFakeip = []byte("fakeip")
bucketETag = []byte("etag")
)

// CacheFile store and update the cache file
Expand Down Expand Up @@ -143,6 +145,59 @@ func (c *CacheFile) FlushFakeIP() error {
return err
}

func (c *CacheFile) SetETagWithHash(url string, hash []byte, etag string) {
if c.DB == nil {
return
}

lenHash := len(hash)
if lenHash > math.MaxUint8 {
return // maybe panic is better
}

data := make([]byte, 1, 1+lenHash+len(etag))
data[0] = uint8(lenHash)
data = append(data, hash...)
data = append(data, etag...)

err := c.DB.Batch(func(t *bbolt.Tx) error {
bucket, err := t.CreateBucketIfNotExists(bucketETag)
if err != nil {
return err
}

return bucket.Put([]byte(url), data)
})
if err != nil {
log.Warnln("[CacheFile] write cache to %s failed: %s", c.DB.Path(), err.Error())
return
}
}
func (c *CacheFile) GetETagWithHash(key string) (hash []byte, etag string) {
if c.DB == nil {
return
}
var value []byte
c.DB.View(func(t *bbolt.Tx) error {
if bucket := t.Bucket(bucketETag); bucket != nil {
if v := bucket.Get([]byte(key)); v != nil {
value = v
}
}
return nil
})
if len(value) == 0 {
return
}
lenHash := int(value[0])
if len(value) < 1+lenHash {
return
}
hash = value[1 : 1+lenHash]
etag = string(value[1+lenHash:])
return
}

func (c *CacheFile) Close() error {
return c.DB.Close()
}
Expand Down
145 changes: 70 additions & 75 deletions component/resource/fetcher.go
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
package resource

import (
"bytes"
"context"
"crypto/md5"
"os"
"path/filepath"
"time"
Expand All @@ -29,10 +27,10 @@ type Fetcher[V any] struct {
name string
vehicle types.Vehicle
updatedAt time.Time
hash [16]byte
hash types.HashType
parser Parser[V]
interval time.Duration
OnUpdate func(V)
onUpdate func(V)
watcher *fswatch.Watcher
}

Expand All @@ -54,92 +52,63 @@ func (f *Fetcher[V]) UpdatedAt() time.Time {

func (f *Fetcher[V]) Initial() (V, error) {
var (
buf []byte
err error
isLocal bool
forceUpdate bool
buf []byte
contents V
err error
)

if stat, fErr := os.Stat(f.vehicle.Path()); fErr == nil {
// local file exists, use it first
buf, err = os.ReadFile(f.vehicle.Path())
modTime := stat.ModTime()
f.updatedAt = modTime
isLocal = true
if time.Since(modTime) > f.interval {
forceUpdate = true
contents, _, err = f.loadBuf(buf, types.MakeHash(buf), false)
f.updatedAt = modTime // reset updatedAt to file's modTime

if err == nil {
err = f.startPullLoop(time.Since(modTime) > f.interval)
if err != nil {
return lo.Empty[V](), err
}
return contents, nil
}
} else {
buf, err = f.vehicle.Read(f.ctx)
f.updatedAt = time.Now()
}

// parse local file error, fallback to remote
contents, _, err = f.Update()

if err != nil {
return lo.Empty[V](), err
}

contents, err := f.parser(buf)
err = f.startPullLoop(false)
if err != nil {
if !isLocal {
return lo.Empty[V](), err
}

// parse local file error, fallback to remote
buf, err = f.vehicle.Read(f.ctx)
if err != nil {
return lo.Empty[V](), err
}

contents, err = f.parser(buf)
if err != nil {
return lo.Empty[V](), err
}

isLocal = false
}

if f.vehicle.Type() != types.File && !isLocal {
if err := safeWrite(f.vehicle.Path(), buf); err != nil {
return lo.Empty[V](), err
}
}

f.hash = md5.Sum(buf)

// pull contents automatically
if f.vehicle.Type() == types.File {
f.watcher, err = fswatch.NewWatcher(fswatch.Options{
Path: []string{f.vehicle.Path()},
Direct: true,
Callback: f.update,
})
if err != nil {
return lo.Empty[V](), err
}
err = f.watcher.Start()
if err != nil {
return lo.Empty[V](), err
}
} else if f.interval > 0 {
go f.pullLoop(forceUpdate)
return lo.Empty[V](), err
}

return contents, nil
}

func (f *Fetcher[V]) Update() (V, bool, error) {
buf, err := f.vehicle.Read(f.ctx)
buf, hash, err := f.vehicle.Read(f.ctx, f.hash)
if err != nil {
return lo.Empty[V](), false, err
}
return f.SideUpdate(buf)
return f.loadBuf(buf, hash, f.vehicle.Type() != types.File)
}

func (f *Fetcher[V]) SideUpdate(buf []byte) (V, bool, error) {
return f.loadBuf(buf, types.MakeHash(buf), true)
}

func (f *Fetcher[V]) loadBuf(buf []byte, hash types.HashType, updateFile bool) (V, bool, error) {
now := time.Now()
hash := md5.Sum(buf)
if bytes.Equal(f.hash[:], hash[:]) {
if f.hash.Equal(hash) {
if updateFile {
_ = os.Chtimes(f.vehicle.Path(), now, now)
}
f.updatedAt = now
_ = os.Chtimes(f.vehicle.Path(), now, now)
return lo.Empty[V](), true, nil
}

if buf == nil { // f.hash has been changed between f.vehicle.Read but should not happen (cause by concurrent)
return lo.Empty[V](), true, nil
}

Expand All @@ -148,15 +117,18 @@ func (f *Fetcher[V]) SideUpdate(buf []byte) (V, bool, error) {
return lo.Empty[V](), false, err
}

if f.vehicle.Type() != types.File {
if updateFile {
if err = safeWrite(f.vehicle.Path(), buf); err != nil {
return lo.Empty[V](), false, err
}
}

f.updatedAt = now
f.hash = hash

if f.onUpdate != nil {
f.onUpdate(contents)
}

return contents, false, nil
}

Expand All @@ -176,7 +148,7 @@ func (f *Fetcher[V]) pullLoop(forceUpdate bool) {

if forceUpdate {
log.Warnln("[Provider] %s not updated for a long time, force refresh", f.Name())
f.update(f.vehicle.Path())
f.updateWithLog()
}

timer := time.NewTimer(initialInterval)
Expand All @@ -185,15 +157,40 @@ func (f *Fetcher[V]) pullLoop(forceUpdate bool) {
select {
case <-timer.C:
timer.Reset(f.interval)
f.update(f.vehicle.Path())
f.updateWithLog()
case <-f.ctx.Done():
return
}
}
}

func (f *Fetcher[V]) update(path string) {
elm, same, err := f.Update()
func (f *Fetcher[V]) startPullLoop(forceUpdate bool) (err error) {
// pull contents automatically
if f.vehicle.Type() == types.File {
f.watcher, err = fswatch.NewWatcher(fswatch.Options{
Path: []string{f.vehicle.Path()},
Direct: true,
Callback: f.updateCallback,
})
if err != nil {
return err
}
err = f.watcher.Start()
if err != nil {
return err
}
} else if f.interval > 0 {
go f.pullLoop(forceUpdate)
}
return
}

func (f *Fetcher[V]) updateCallback(path string) {
f.updateWithLog()
}

func (f *Fetcher[V]) updateWithLog() {
_, same, err := f.Update()
if err != nil {
log.Errorln("[Provider] %s pull error: %s", f.Name(), err.Error())
return
Expand All @@ -205,9 +202,7 @@ func (f *Fetcher[V]) update(path string) {
}

log.Infoln("[Provider] %s's content update", f.Name())
if f.OnUpdate != nil {
f.OnUpdate(elm)
}
return
}

func safeWrite(path string, buf []byte) error {
Expand All @@ -230,7 +225,7 @@ func NewFetcher[V any](name string, interval time.Duration, vehicle types.Vehicl
name: name,
vehicle: vehicle,
parser: parser,
OnUpdate: onUpdate,
onUpdate: onUpdate,
interval: interval,
}
}
Loading

0 comments on commit 6cb0830

Please sign in to comment.