diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 8836fe82..6df56c66 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -7,10 +7,7 @@ on: tags: - 'v*' pull_request: - types: - - opened - - reopened - - edited + types: [opened, reopened, edited, ready_for_review] jobs: build: @@ -105,3 +102,16 @@ jobs: tags: master tag_with_sha: true always_pull: true + + # docker build (branch) + - name: docker - build other + if: startsWith(github.ref, 'refs/heads/master') == false + uses: docker/build-push-action@v1 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + repository: cloudb0x/autoscan + dockerfile: docker/Dockerfile + tag_with_ref: true + tag_with_sha: false + always_pull: true \ No newline at end of file diff --git a/.github/workflows/cleanup.yml b/.github/workflows/cleanup.yml new file mode 100644 index 00000000..79e6f27e --- /dev/null +++ b/.github/workflows/cleanup.yml @@ -0,0 +1,57 @@ +name: Docker Cleanup + +on: + pull_request: + types: [closed, removed] + delete: + +jobs: + cleanup_branch: + if: startsWith(github.event.ref_type, 'branch') == true + runs-on: ubuntu-latest + steps: + - name: Determine docker tag + env: + GH_EVENT_REF: ${{ github.event.ref }} + id: dockertag + shell: bash + run: echo "::set-output name=tag::${GH_EVENT_REF#refs/heads/}" + + - name: Remove branch docker tag + shell: bash + env: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + tag: ${{ steps.dockertag.outputs.tag }} + run: | + docker run --rm lumir/remove-dockerhub-tag --user "$username" --password "$password" cloudb0x/autoscan:$tag + + cleanup_pr: + if: startsWith(github.event.ref, 'refs/pull/') == true + env: + GH_EVENT_REF: ${{ github.event.ref }} + runs-on: ubuntu-latest + steps: + - name: Determine pull request tag + id: githubpr + shell: bash + run: echo "::set-output name=tag::${GH_EVENT_REF#refs/pull/}" + + - name: Determine docker tag + uses: frabert/replace-string-action@master + id: dockertag + with: + pattern: '[:\.]+' + string: "${{ steps.githubpr.outputs.tag }}" + replace-with: '-' + flags: 'g' + + - name: Remove pull request docker tag + shell: bash + env: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + tag: ${{ steps.dockertag.outputs.replaced }} + run: | + docker run --rm lumir/remove-dockerhub-tag --user "$username" --password "$password" cloudb0x/autoscan:pr-$tag + diff --git a/README.md b/README.md index 508c14e0..cf8daaa8 100644 --- a/README.md +++ b/README.md @@ -78,7 +78,7 @@ That's where rewrite rules come into play. They allow you to translate paths bet **Before you begin, make sure you understand how regular expressions work!** \ Make sure you know how capture groups work, as these are used for the `to` field. -Triggers can receive paths from any source: A remote server, a Docker container and the local file system. The `rewrite` field can be defined for each individual trigger. The `from` should be a regexp pattern describing the path from the trigger's perspective. The `to` should then convert this path into a path which is local to Autoscan. +Triggers can receive paths from any source: A remote server, a Docker container and the local file system. The `rewrite` field can be defined for each individual trigger. This field can contain multiple rewriting rules. Therefore, each rule should have a `-` to indicate the next rule on the list. The `from` should be a regexp pattern describing the path from the trigger's perspective. The `to` should then convert this path into a path which is local to Autoscan. Targets work the other way around. They have to convert the path local to Autoscan to a path understood by the target, which can be a Docker container, remote server, etc. The `from` should be a regexp pattern describing the path from Autoscan's perspective. The `to` should then convert this path into a path which is local to the target. @@ -96,24 +96,24 @@ The following config only defines rewrite paths, this should not be used directl triggers: sonarr: - rewrite: - # /tv contains folders with tv shows - # This path is used within the Sonarr Docker container - from: /tv/* + # /tv contains folders with tv shows + # This path is used within the Sonarr Docker container + - from: /tv/* - # /mnt/unionfs/Media/TV links to the same folder, though from the host OS - # This folder is accessed by Autoscan - to: /mnt/unionfs/Media/TV/$1 + # /mnt/unionfs/Media/TV links to the same folder, though from the host OS + # This folder is accessed by Autoscan + to: /mnt/unionfs/Media/TV/$1 targets: plex: - rewrite: - # Same folder as above, accessible by Autoscan. - # Note how we strip the "TV" part, - # as we want both Movies and TV. - from: /mnt/unionfs/Media/* + # Same folder as above, accessible by Autoscan. + # Note how we strip the "TV" part, + # as we want both Movies and TV. + - from: /mnt/unionfs/Media/* - # This path is used within the Plex Docker container - to: /data/$1 + # This path is used within the Plex Docker container + to: /data/$1 ``` Let's take a look at the journey of the path `/tv/Westworld/Season 1/s01e01.mkv` coming from Sonarr. @@ -135,7 +135,7 @@ We plan to support two kinds of triggers in GA: - Daemon processes. These triggers run in the background and fetch resources based on a cron schedule or in real-time. \ - *Currently not available, but expected to arrive in GA.* + *Available, but bugs may still exist.* - Webhooks. These triggers expose HTTP handlers which can be added to the trigger's software. \ @@ -152,6 +152,14 @@ Each trigger consists of at least: - RegExp-based rewriting rules: translate a path given by the trigger to a path on the local file system. \ *If the paths are identical between the trigger and the local file system, then the `rewrite` field should be ignored.* +#### Daemons + +Daemons run in the background and continuously fetch new changes based on a [cron expression](https://crontab.guru). + +The following daemons are currently provided by Autoscan: + +- Google Drive + #### Webhooks Webhooks, also known as HTTPTriggers internally, process HTTP requests on their exposed endpoints. @@ -183,6 +191,26 @@ authentication: port: 3030 triggers: + bernard: + - account: service-account.json + cron: "*/5 * * * *" # every five minutes (the "" are important) + priority: 0 + drives: + - id: Shared Drive 1 + - id: Shared Drive 2 + + # rewrite drive to the local filesystem + rewrite: + - from: ^/Media/* + to: /mnt/unionfs/Media/$1 + + # filter with regular expressions + include: # if set, then exclude is ignored + - "^/mnt/unionfs/Media/*" + + exclude: + - "\.srt$" + sonarr: - name: sonarr-docker # /triggers/sonarr-docker priority: 2 @@ -190,8 +218,8 @@ triggers: # Rewrite the path from within the container # to your local filesystem. rewrite: - from: /tv/* - to: /mnt/unionfs/Media/TV/$1 + - from: /tv/* + to: /mnt/unionfs/Media/TV/$1 radarr: - name: radarr # /triggers/radarr @@ -299,8 +327,8 @@ targets: - url: https://plex.domain.tld # URL of your Plex server token: XXXX # Plex API Token rewrite: - from: /mnt/unionfs/Media/* # local file system - to: /data/$1 # path accessible by the Plex docker container (if applicable) + - from: /mnt/unionfs/Media/* # local file system + to: /data/$1 # path accessible by the Plex docker container (if applicable) ``` There are a couple of things to take note of in the config: @@ -321,8 +349,8 @@ targets: - url: https://emby.domain.tld # URL of your Emby server token: XXXX # Emby API Token rewrite: - from: /mnt/unionfs/Media/* # local file system - to: /data/$1 # path accessible by the Emby docker container (if applicable) + - from: /mnt/unionfs/Media/* # local file system + to: /data/$1 # path accessible by the Emby docker container (if applicable) ``` - URL. The URL can link to the docker container directly, the localhost or a reverse proxy sitting in front of Emby. @@ -366,8 +394,8 @@ triggers: # Rewrite the path from within the container # to your local filesystem. rewrite: - from: /tv/* - to: /mnt/unionfs/Media/TV/$1 + - from: /tv/* + to: /mnt/unionfs/Media/TV/$1 radarr: - name: radarr # /triggers/radarr @@ -385,15 +413,15 @@ targets: - url: https://plex.domain.tld # URL of your Plex server token: XXXX # Plex API Token rewrite: - from: /mnt/unionfs/Media/* # local file system - to: /data/$1 # path accessible by the Plex docker container (if applicable) + - from: /mnt/unionfs/Media/* # local file system + to: /data/$1 # path accessible by the Plex docker container (if applicable) emby: - url: https://emby.domain.tld # URL of your Emby server token: XXXX # Emby API Token rewrite: - from: /mnt/unionfs/Media/* # local file system - to: /data/$1 # path accessible by the Emby docker container (if applicable) + - from: /mnt/unionfs/Media/* # local file system + to: /data/$1 # path accessible by the Emby docker container (if applicable) ``` ## Other installation options @@ -423,7 +451,7 @@ Autoscan's Docker image provides various versions that are available via tags. T docker run \ --name=autoscan \ -e "PUID=1000" \ - -e "PGID=1000" \ + -e "PGID=1001" \ -p 3030:3030 \ -v "/opt/autoscan:/config" \ -v "/mnt/unionfs:/mnt/unionfs:ro" \ diff --git a/autoscan.go b/autoscan.go index b912144c..189365d6 100644 --- a/autoscan.go +++ b/autoscan.go @@ -4,6 +4,7 @@ import ( "errors" "net/http" "regexp" + "time" ) // A Scan is at the core of Autoscan. @@ -15,6 +16,8 @@ type Scan struct { File string Priority int Retries int + Removed bool + Time time.Time } type ProcessorFunc func(...Scan) error @@ -71,22 +74,25 @@ type Rewrite struct { type Rewriter func(string) string -func NewRewriter(r Rewrite) (Rewriter, error) { - if r.From == "" || r.To == "" { - rewriter := func(input string) string { - return input +func NewRewriter(rewriteRules []Rewrite) (Rewriter, error) { + var rewrites []regexp.Regexp + for _, rule := range rewriteRules { + re, err := regexp.Compile(rule.From) + if err != nil { + return nil, err } - return rewriter, nil - } - - re, err := regexp.Compile(r.From) - if err != nil { - return nil, err + rewrites = append(rewrites, *re) } rewriter := func(input string) string { - return re.ReplaceAllString(input, r.To) + for i, r := range rewrites { + if r.MatchString(input) { + return r.ReplaceAllString(input, rewriteRules[i].To) + } + } + + return input } return rewriter, nil diff --git a/autoscan_test.go b/autoscan_test.go index 64458cd7..abe1ae44 100644 --- a/autoscan_test.go +++ b/autoscan_test.go @@ -7,8 +7,7 @@ import ( func TestRewriter(t *testing.T) { type Test struct { Name string - From string - To string + Rewrites []Rewrite Input string Expected string } @@ -16,40 +15,68 @@ func TestRewriter(t *testing.T) { var testCases = []Test{ { Name: "One parameter with wildcard", - From: "/mnt/unionfs/Media/*", - To: "/data/$1", Input: "/mnt/unionfs/Media/Movies/Example Movie/movie.mkv", Expected: "/data/Movies/Example Movie/movie.mkv", + Rewrites: []Rewrite{{ + From: "/mnt/unionfs/Media/*", + To: "/data/$1", + }}, }, { Name: "One parameter with glob thingy", - From: "/Media/(.*)", - To: "/data/$1", Input: "/Media/Movies/test.mkv", Expected: "/data/Movies/test.mkv", + Rewrites: []Rewrite{{ + From: "/Media/(.*)", + To: "/data/$1", + }}, }, { Name: "No wildcard", - From: "^/Media/", - To: "/$1", Input: "/Media/whatever", Expected: "/whatever", + Rewrites: []Rewrite{{ + From: "^/Media/", + To: "/$1", + }}, }, { Name: "Unicode (PAS issue #73)", - From: "/media/b33f/saitoh183/private/*", - To: "/$1", Input: "/media/b33f/saitoh183/private/Videos/FrenchTV/L'échappée/Season 03", Expected: "/Videos/FrenchTV/L'échappée/Season 03", + Rewrites: []Rewrite{{ + From: "/media/b33f/saitoh183/private/*", + To: "/$1", + }}, + }, + { + Name: "Returns input when no rules are given", + Input: "/mnt/unionfs/test/example.mp4", + Expected: "/mnt/unionfs/test/example.mp4", + }, + { + Name: "Returns input when rule does not match", + Input: "/test/example.mp4", + Expected: "/test/example.mp4", + Rewrites: []Rewrite{{ + From: "^/Media/*", + To: "/mnt/unionfs/Media/$1", + }}, + }, + { + Name: "Uses second rule if first one does not match", + Input: "/test/example.mp4", + Expected: "/mnt/unionfs/example.mp4", + Rewrites: []Rewrite{ + {From: "^/Media/*", To: "/mnt/unionfs/Media/$1"}, + {From: "^/test/*", To: "/mnt/unionfs/$1"}, + }, }, } for _, tc := range testCases { t.Run(tc.Name, func(t *testing.T) { - rewriter, err := NewRewriter(Rewrite{ - From: tc.From, - To: tc.To, - }) + rewriter, err := NewRewriter(tc.Rewrites) if err != nil { t.Fatal(err) diff --git a/cmd/autoscan/main.go b/cmd/autoscan/main.go index 1e13c946..49db1cfd 100644 --- a/cmd/autoscan/main.go +++ b/cmd/autoscan/main.go @@ -20,6 +20,7 @@ import ( "github.com/cloudbox/autoscan/processor" "github.com/cloudbox/autoscan/targets/plex" "github.com/cloudbox/autoscan/triggers" + "github.com/cloudbox/autoscan/triggers/bernard" "github.com/cloudbox/autoscan/triggers/lidarr" "github.com/cloudbox/autoscan/triggers/radarr" "github.com/cloudbox/autoscan/triggers/sonarr" @@ -41,9 +42,10 @@ type config struct { // autoscan.HTTPTrigger Triggers struct { - Lidarr []lidarr.Config `yaml:"lidarr"` - Radarr []radarr.Config `yaml:"radarr"` - Sonarr []sonarr.Config `yaml:"sonarr"` + Bernard []bernard.Config `yaml:"bernard"` + Lidarr []lidarr.Config `yaml:"lidarr"` + Radarr []radarr.Config `yaml:"radarr"` + Sonarr []sonarr.Config `yaml:"sonarr"` } `yaml:"triggers"` // autoscan.Target @@ -184,7 +186,24 @@ func main() { log.Warn().Msg("Webhooks running without authentication") } - // triggers + // Daemon Triggers + for _, t := range c.Triggers.Bernard { + if t.DatastorePath == "" { + t.DatastorePath = cli.Database + } + + trigger, err := bernard.New(t) + if err != nil { + log.Fatal(). + Err(err). + Str("trigger", "bernard"). + Msg("Failed initialising trigger") + } + + go trigger(proc.Add) + } + + // HTTP Triggers for _, t := range c.Triggers.Lidarr { trigger, err := lidarr.New(t) if err != nil { @@ -234,6 +253,7 @@ func main() { }() log.Info(). + Int("bernard", len(c.Triggers.Bernard)). Int("lidarr", len(c.Triggers.Lidarr)). Int("sonarr", len(c.Triggers.Sonarr)). Int("radarr", len(c.Triggers.Radarr)). @@ -294,9 +314,9 @@ func main() { default: log.Error(). Err(err). - Msg("Not all targets are available, retrying in 5 seconds...") + Msg("Not all targets are available, retrying in 15 seconds...") - time.Sleep(5 * time.Second) + time.Sleep(15 * time.Second) continue } } @@ -327,6 +347,11 @@ func main() { case errors.Is(err, autoscan.ErrTargetUnavailable): targetsAvailable = false + log.Error(). + Err(err). + Msg("Not all targets are available, retrying in 15 seconds...") + + time.Sleep(15 * time.Second) default: // unexpected error diff --git a/go.mod b/go.mod index 438fce92..e8189c0c 100644 --- a/go.mod +++ b/go.mod @@ -7,12 +7,16 @@ require ( github.com/alecthomas/kong v0.2.11 github.com/justinas/alice v1.2.0 github.com/kirsle/configdir v0.0.0-20170128060238-e45d2f54772f - github.com/mattn/go-sqlite3 v1.14.0 + github.com/m-rots/bernard v0.3.3-0.20200804121414-38394a889536 + github.com/m-rots/stubbs v1.0.1-0.20200804111142-55ef5d1857d9 + github.com/mattn/go-sqlite3 v2.0.3+incompatible github.com/natefinch/lumberjack v2.0.0+incompatible github.com/pkg/errors v0.9.1 // indirect + github.com/robfig/cron/v3 v3.0.1 github.com/rs/zerolog v1.19.0 golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 golang.org/x/sys v0.0.0-20200724161237-0e2f3a69832c + golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/yaml.v2 v2.3.0 ) diff --git a/go.sum b/go.sum index 77246bd2..e6a105fb 100644 --- a/go.sum +++ b/go.sum @@ -1,9 +1,8 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= +github.com/alecthomas/kong v0.2.9/go.mod h1:kQOmtJgV+Lb4aj+I2LEn40cbtawdWJ9Y8QLq+lElKxE= github.com/alecthomas/kong v0.2.11 h1:RKeJXXWfg9N47RYfMm0+igkxBCTF4bzbneAxaqid0c4= github.com/alecthomas/kong v0.2.11/go.mod h1:kQOmtJgV+Lb4aj+I2LEn40cbtawdWJ9Y8QLq+lElKxE= -github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -11,8 +10,14 @@ github.com/justinas/alice v1.2.0 h1:+MHSA/vccVCF4Uq37S42jwlkvI2Xzl7zTPCN5BnZNVo= github.com/justinas/alice v1.2.0/go.mod h1:fN5HRH/reO/zrUflLfTN43t3vXvKzvZIENsNEe7i7qA= github.com/kirsle/configdir v0.0.0-20170128060238-e45d2f54772f h1:dKccXx7xA56UNqOcFIbuqFjAWPVtP688j5QMgmo6OHU= github.com/kirsle/configdir v0.0.0-20170128060238-e45d2f54772f/go.mod h1:4rEELDSfUAlBSyUjPG0JnaNGjf13JySHFeRdD/3dLP0= -github.com/mattn/go-sqlite3 v1.14.0 h1:mLyGNKR8+Vv9CAU7PphKa2hkEqxxhn8i32J6FPj1/QA= -github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus= +github.com/m-rots/bernard v0.3.3-0.20200804121414-38394a889536 h1:0aY4VrX9R+Y0VArW9EyeZF5JJws4w6aLf7V/YdVHOTQ= +github.com/m-rots/bernard v0.3.3-0.20200804121414-38394a889536/go.mod h1:yDQffALXQDh6sTXdFCbI2rJtYuXgx41MyJM6Sf/j7Sc= +github.com/m-rots/stubbs v1.0.0 h1:lBrjn27J32/iGHp7eKPYGcphuqDIg5UIs/YI4q1m63Q= +github.com/m-rots/stubbs v1.0.0/go.mod h1:iDS6z2oonw2UMo2l0S1WTPJ9git7FWU4YEo6fq7F2WU= +github.com/m-rots/stubbs v1.0.1-0.20200804111142-55ef5d1857d9 h1:ent/Dhpzz7hiCBiDgJL2iS5G0Yzd/a9GpP6XSOet0Qc= +github.com/m-rots/stubbs v1.0.1-0.20200804111142-55ef5d1857d9/go.mod h1:Ive+DY/P1EikQ644M3tuyvsO/7ohPLnmEru2L+6hbVw= +github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U= +github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM= github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= @@ -21,6 +26,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.19.0 h1:hYz4ZVdUgjXTBUmrkrw55j1nHx68LfOKIQk5IYtyScg= @@ -28,19 +35,16 @@ github.com/rs/zerolog v1.19.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJ github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200724161237-0e2f3a69832c h1:UIcGWL6/wpCfyGuJnRFJRurA+yj8RrW7Q6x2YMCXt6c= golang.org/x/sys v0.0.0-20200724161237-0e2f3a69832c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= diff --git a/processor/datastore.go b/processor/datastore.go index 7c2fc667..4af748e2 100644 --- a/processor/datastore.go +++ b/processor/datastore.go @@ -22,6 +22,7 @@ CREATE TABLE IF NOT EXISTS scan ( "priority" INTEGER NOT NULL, "time" DATETIME NOT NULL, "retries" INTEGER NOT NULL, + "removed" BOOLEAN NOT NULL, PRIMARY KEY(folder, file) ) ` @@ -45,16 +46,17 @@ func newDatastore(path string) (*datastore, error) { } const sqlUpsert = ` -INSERT INTO scan (folder, file, priority, time, retries) -VALUES (?, ?, ?, ?, ?) +INSERT INTO scan (folder, file, priority, time, retries, removed) +VALUES (?, ?, ?, ?, ?, ?) ON CONFLICT (folder, file) DO UPDATE SET priority = MAX(excluded.priority, scan.priority), time = excluded.time, - retries = excluded.retries + retries = excluded.retries, + removed = min(excluded.removed, scan.removed) ` func (store datastore) upsert(tx *sql.Tx, scan autoscan.Scan) error { - _, err := tx.Exec(sqlUpsert, scan.Folder, scan.File, scan.Priority, now(), scan.Retries) + _, err := tx.Exec(sqlUpsert, scan.Folder, scan.File, scan.Priority, scan.Time, scan.Retries, scan.Removed) return err } @@ -78,7 +80,7 @@ func (store datastore) Upsert(scans []autoscan.Scan) error { } const sqlGetMatching = ` -SELECT folder, file, priority, retries FROM scan +SELECT folder, file, priority, retries, removed, time FROM scan WHERE folder = ( SELECT folder FROM scan @@ -102,7 +104,7 @@ func (store datastore) GetMatching(minAge time.Duration) (scans []autoscan.Scan, defer rows.Close() for rows.Next() { scan := autoscan.Scan{} - err = rows.Scan(&scan.Folder, &scan.File, &scan.Priority, &scan.Retries) + err = rows.Scan(&scan.Folder, &scan.File, &scan.Priority, &scan.Retries, &scan.Removed, &scan.Time) if err != nil { return scans, err } @@ -165,7 +167,7 @@ func (store datastore) Retry(folder string, maxRetries int) error { } const sqlGetAll = ` -SELECT folder, file, priority, retries FROM scan +SELECT folder, file, priority, retries, removed, time FROM scan ` func (store datastore) GetAll() (scans []autoscan.Scan, err error) { @@ -181,7 +183,7 @@ func (store datastore) GetAll() (scans []autoscan.Scan, err error) { defer rows.Close() for rows.Next() { scan := autoscan.Scan{} - err = rows.Scan(&scan.Folder, &scan.File, &scan.Priority, &scan.Retries) + err = rows.Scan(&scan.Folder, &scan.File, &scan.Priority, &scan.Retries, &scan.Removed, &scan.Time) if err != nil { return scans, err } diff --git a/processor/datastore_test.go b/processor/datastore_test.go index 25e8623e..7c7d41b1 100644 --- a/processor/datastore_test.go +++ b/processor/datastore_test.go @@ -13,44 +13,8 @@ import ( _ "github.com/mattn/go-sqlite3" ) -type ScanWithTime struct { - Scan autoscan.Scan - Time time.Time -} - -const sqlGetAllWithTime = ` -SELECT folder, file, priority, retries, time FROM scan -` - -func (store datastore) GetAllWithTime() (scans []ScanWithTime, err error) { - rows, err := store.db.Query(sqlGetAllWithTime) - if errors.Is(err, sql.ErrNoRows) { - return scans, nil - } - - if err != nil { - return scans, err - } - - defer rows.Close() - for rows.Next() { - withTime := ScanWithTime{} - scan := &withTime.Scan - - err = rows.Scan(&scan.Folder, &scan.File, &scan.Priority, &scan.Retries, &withTime.Time) - if err != nil { - return scans, err - } - - withTime.Time = withTime.Time.UTC() - scans = append(scans, withTime) - } - - return scans, rows.Err() -} - const sqlGetScan = ` -SELECT folder, file, priority, time, retries FROM scan +SELECT folder, file, priority, time, retries, removed FROM scan WHERE folder = $1 AND file = $2 ` @@ -59,7 +23,7 @@ func GetScan(t *testing.T, db *sql.DB, folder string, file string) (scan autosca row := db.QueryRow(sqlGetScan, folder, file) - err := row.Scan(&scan.Folder, &scan.File, &scan.Priority, &scanTime, &scan.Retries) + err := row.Scan(&scan.Folder, &scan.File, &scan.Priority, &scanTime, &scan.Retries, &scan.Removed) if err != nil { t.Fatalf("Could not scan the row: %v", err) } @@ -88,6 +52,8 @@ func TestUpsert(t *testing.T) { File: "test.mkv", Priority: 5, Retries: 2, + Removed: true, + Time: time.Time{}.Add(1), }, }, Want: Want{ @@ -97,6 +63,7 @@ func TestUpsert(t *testing.T) { File: "test.mkv", Priority: 5, Retries: 2, + Removed: true, }, }, }, @@ -105,12 +72,15 @@ func TestUpsert(t *testing.T) { Scans: []autoscan.Scan{ { Priority: 2, + Time: time.Time{}.Add(1), }, { Priority: 5, + Time: time.Time{}.Add(2), }, { Priority: 3, + Time: time.Time{}.Add(3), }, }, Want: Want{ @@ -120,6 +90,29 @@ func TestUpsert(t *testing.T) { }, }, }, + { + Name: "Removed should remain false on upsert", + Scans: []autoscan.Scan{ + { + Removed: true, + Time: time.Time{}.Add(1), + }, + { + Removed: false, + Time: time.Time{}.Add(2), + }, + { + Removed: true, + Time: time.Time{}.Add(3), + }, + }, + Want: Want{ + Time: time.Time{}.Add(3), + Scan: autoscan.Scan{ + Removed: false, + }, + }, + }, } for _, tc := range testCases { @@ -129,12 +122,6 @@ func TestUpsert(t *testing.T) { t.Fatal(err) } - var currentTime time.Time - now = func() time.Time { - currentTime = currentTime.Add(1) - return currentTime - } - err = store.Upsert(tc.Scans) if err != nil { t.Fatal(err) @@ -159,78 +146,67 @@ func TestGetMatching(t *testing.T) { Name string Now time.Time MinAge time.Duration - Scans []ScanWithTime + Scans []autoscan.Scan Want []autoscan.Scan } - testTime := time.Now() + testTime := time.Now().UTC() var testCases = []Test{ { Name: "Retrieves no items if all items are too young", Now: testTime, MinAge: 2 * time.Minute, - Scans: []ScanWithTime{ - { - Time: testTime.Add(-1 * time.Minute), - Scan: autoscan.Scan{ - File: "1", - }, - }, - { - Time: testTime.Add(-1 * time.Minute), - Scan: autoscan.Scan{ - File: "2", - }, - }, + Scans: []autoscan.Scan{ + {File: "1", Time: testTime.Add(-1 * time.Minute)}, + {File: "2", Time: testTime.Add(-1 * time.Minute)}, }, }, { Name: "Retrieves no items if some items are too young", Now: testTime, MinAge: 9 * time.Minute, - Scans: []ScanWithTime{ - {autoscan.Scan{File: "1"}, testTime.Add(-8 * time.Minute)}, - {autoscan.Scan{File: "2"}, testTime.Add(-10 * time.Minute)}, + Scans: []autoscan.Scan{ + {File: "1", Time: testTime.Add(-8 * time.Minute)}, + {File: "2", Time: testTime.Add(-10 * time.Minute)}, }, }, { Name: "Retrieves all items if all items are older than minimum age minutes", Now: testTime, MinAge: 5 * time.Minute, - Scans: []ScanWithTime{ - {autoscan.Scan{File: "1"}, testTime.Add(-6 * time.Minute)}, - {autoscan.Scan{File: "2"}, testTime.Add(-6 * time.Minute)}, + Scans: []autoscan.Scan{ + {File: "1", Time: testTime.Add(-6 * time.Minute)}, + {File: "2", Time: testTime.Add(-6 * time.Minute)}, }, Want: []autoscan.Scan{ - {File: "1"}, - {File: "2"}, + {File: "1", Time: testTime.Add(-6 * time.Minute)}, + {File: "2", Time: testTime.Add(-6 * time.Minute)}, }, }, { Name: "Retrieves only one folder if all items are older than minimum age minutes", Now: testTime, MinAge: 5 * time.Minute, - Scans: []ScanWithTime{ - {autoscan.Scan{Folder: "folder 1", File: "1"}, testTime.Add(-6 * time.Minute)}, - {autoscan.Scan{Folder: "folder 2", File: "1"}, testTime.Add(-6 * time.Minute)}, + Scans: []autoscan.Scan{ + {Folder: "folder 1", File: "1", Time: testTime.Add(-6 * time.Minute)}, + {Folder: "folder 2", File: "1", Time: testTime.Add(-6 * time.Minute)}, }, Want: []autoscan.Scan{ - {Folder: "folder 1", File: "1"}, + {Folder: "folder 1", File: "1", Time: testTime.Add(-6 * time.Minute)}, }, }, { Name: "Returns all fields", Now: testTime, MinAge: 5 * time.Minute, - Scans: []ScanWithTime{ + Scans: []autoscan.Scan{ { - Time: testTime.Add(-6 * time.Minute), - Scan: autoscan.Scan{ - Folder: "Amazing folder", - File: "Wholesome file", - Priority: 69, - }, + Folder: "Amazing folder", + File: "Wholesome file", + Priority: 69, + Removed: true, + Time: testTime.Add(-6 * time.Minute), }, }, Want: []autoscan.Scan{ @@ -238,6 +214,8 @@ func TestGetMatching(t *testing.T) { Folder: "Amazing folder", File: "Wholesome file", Priority: 69, + Removed: true, + Time: testTime.Add(-6 * time.Minute), }, }, }, @@ -258,14 +236,8 @@ func TestGetMatching(t *testing.T) { t.Fatal(err) } - var scanTime time.Time - now = func() time.Time { - return scanTime - } - for _, scan := range tc.Scans { - scanTime = scan.Time - err = store.upsert(tx, scan.Scan) + err = store.upsert(tx, scan) if err != nil { t.Fatal(err) } @@ -287,6 +259,7 @@ func TestGetMatching(t *testing.T) { if !reflect.DeepEqual(scans, tc.Want) { t.Log(scans) + t.Log(tc.Want) t.Errorf("Scans do not match") } }) @@ -300,7 +273,7 @@ func TestRetries(t *testing.T) { Folder string Retries int Scans []autoscan.Scan - Want []ScanWithTime + Want []autoscan.Scan } testTime := time.Now().UTC() @@ -315,14 +288,14 @@ func TestRetries(t *testing.T) { Folder: "1", Retries: 5, Scans: []autoscan.Scan{ - {Folder: "1", File: "1", Retries: 0}, - {Folder: "1", File: "2", Retries: 2}, - {Folder: "2", File: "1", Retries: 0}, + {Folder: "1", File: "1", Retries: 0, Time: testTime}, + {Folder: "1", File: "2", Retries: 2, Time: testTime}, + {Folder: "2", File: "1", Retries: 0, Time: testTime}, }, - Want: []ScanWithTime{ - {autoscan.Scan{Folder: "1", File: "1", Retries: 1}, testTime.Add(5 * time.Minute)}, - {autoscan.Scan{Folder: "1", File: "2", Retries: 3}, testTime.Add(5 * time.Minute)}, - {autoscan.Scan{Folder: "2", File: "1", Retries: 0}, testTime.Add(0 * time.Minute)}, + Want: []autoscan.Scan{ + {Folder: "1", File: "1", Retries: 1, Time: testTime.Add(5 * time.Minute)}, + {Folder: "1", File: "2", Retries: 3, Time: testTime.Add(5 * time.Minute)}, + {Folder: "2", File: "1", Retries: 0, Time: testTime.Add(0 * time.Minute)}, }, }, { @@ -330,11 +303,11 @@ func TestRetries(t *testing.T) { Folder: "1", Retries: 2, Scans: []autoscan.Scan{ - {Folder: "1", File: "1", Retries: 1}, - {Folder: "1", File: "2", Retries: 2}, + {Folder: "1", File: "1", Retries: 1, Time: testTime}, + {Folder: "1", File: "2", Retries: 2, Time: testTime}, }, - Want: []ScanWithTime{ - {autoscan.Scan{Folder: "1", File: "1", Retries: 2}, testTime.Add(5 * time.Minute)}, + Want: []autoscan.Scan{ + {Folder: "1", File: "1", Retries: 2, Time: testTime.Add(5 * time.Minute)}, }, }, } @@ -346,10 +319,6 @@ func TestRetries(t *testing.T) { t.Fatal(err) } - now = func() time.Time { - return testTime - } - err = store.Upsert(tc.Scans) if err != nil { t.Fatal(err) @@ -364,7 +333,7 @@ func TestRetries(t *testing.T) { t.Fatal(err) } - scans, err := store.GetAllWithTime() + scans, err := store.GetAll() if err != nil { t.Fatal(err) } diff --git a/processor/processor.go b/processor/processor.go index b12aad56..2f7da72a 100644 --- a/processor/processor.go +++ b/processor/processor.go @@ -95,7 +95,7 @@ func (p *Processor) Process(targets []autoscan.Target) error { // We do not want to try to scan non-existing files. var existingScans []autoscan.Scan for _, scan := range scans { - if fileExists(path.Join(scan.Folder, scan.File)) { + if scan.Removed != fileExists(path.Join(scan.Folder, scan.File)) { existingScans = append(existingScans, scan) } } diff --git a/targets/emby/emby.go b/targets/emby/emby.go index 9695a335..87f7f883 100644 --- a/targets/emby/emby.go +++ b/targets/emby/emby.go @@ -9,10 +9,10 @@ import ( ) type Config struct { - URL string `yaml:"url"` - Token string `yaml:"token"` - Rewrite autoscan.Rewrite `yaml:"rewrite"` - Verbosity string `yaml:"verbosity"` + URL string `yaml:"url"` + Token string `yaml:"token"` + Rewrite []autoscan.Rewrite `yaml:"rewrite"` + Verbosity string `yaml:"verbosity"` } type target struct { diff --git a/targets/plex/plex.go b/targets/plex/plex.go index adb3da2c..4ab1f692 100644 --- a/targets/plex/plex.go +++ b/targets/plex/plex.go @@ -10,10 +10,10 @@ import ( ) type Config struct { - URL string `yaml:"url"` - Token string `yaml:"token"` - Rewrite autoscan.Rewrite `yaml:"rewrite"` - Verbosity string `yaml:"verbosity"` + URL string `yaml:"url"` + Token string `yaml:"token"` + Rewrite []autoscan.Rewrite `yaml:"rewrite"` + Verbosity string `yaml:"verbosity"` } type target struct { diff --git a/triggers/bernard/bernard.go b/triggers/bernard/bernard.go new file mode 100644 index 00000000..22ebbcc5 --- /dev/null +++ b/triggers/bernard/bernard.go @@ -0,0 +1,421 @@ +package bernard + +import ( + "errors" + "fmt" + "github.com/robfig/cron/v3" + "path/filepath" + "time" + + "github.com/cloudbox/autoscan" + lowe "github.com/m-rots/bernard" + ds "github.com/m-rots/bernard/datastore" + "github.com/m-rots/bernard/datastore/sqlite" + "github.com/m-rots/stubbs" + "github.com/rs/zerolog" +) + +const ( + maxSyncRetries = 5 +) + +type Config struct { + AccountPath string `yaml:"account"` + CronSchedule string `yaml:"cron"` + DatastorePath string `yaml:"database"` + Priority int `yaml:"priority"` + TimeOffset time.Duration `yaml:"time-offset"` + Verbosity string `yaml:"verbosity"` + Rewrite []autoscan.Rewrite `yaml:"rewrite"` + Include []string `yaml:"include"` + Exclude []string `yaml:"exclude"` + Drives []struct { + ID string `yaml:"id"` + TimeOffset time.Duration `yaml:"time-offset"` + Rewrite []autoscan.Rewrite `yaml:"rewrite"` + Include []string `yaml:"include"` + Exclude []string `yaml:"exclude"` + } `yaml:"drives"` +} + +func New(c Config) (autoscan.Trigger, error) { + l := autoscan.GetLogger(c.Verbosity).With(). + Str("trigger", "bernard"). + Logger() + + const scope = "https://www.googleapis.com/auth/drive.readonly" + auth, err := stubbs.FromFile(c.AccountPath, []string{scope}, 3600) + if err != nil { + return nil, fmt.Errorf("%v: %w", err, autoscan.ErrFatal) + } + + store, err := sqlite.New(c.DatastorePath) + if err != nil { + return nil, fmt.Errorf("%v: %w", err, autoscan.ErrFatal) + } + + limiter, err := getRateLimiter(auth.Email()) + if err != nil { + return nil, fmt.Errorf("%v: %w", err, autoscan.ErrFatal) + } + + bernard := lowe.New(auth, store, + lowe.WithPreRequestHook(limiter.Wait), + lowe.WithSafeSleep(120*time.Second)) + + var drives []drive + for _, d := range c.Drives { + d := d + + rewriter, err := autoscan.NewRewriter(append(d.Rewrite, c.Rewrite...)) + if err != nil { + return nil, err + } + + filterer, err := newFilterer(append(d.Include, c.Include...), append(d.Exclude, c.Exclude...)) + if err != nil { + return nil, err + } + + scanTime := func() time.Time { + if d.TimeOffset.Seconds() > 0 { + return time.Now().Add(d.TimeOffset) + } + return time.Now().Add(c.TimeOffset) + } + + drives = append(drives, drive{ + ID: d.ID, + Rewriter: rewriter, + Allowed: filterer, + ScanTime: scanTime, + }) + } + + trigger := func(callback autoscan.ProcessorFunc) { + d := daemon{ + log: l, + callback: callback, + cronSchedule: c.CronSchedule, + priority: c.Priority, + drives: drives, + bernard: bernard, + store: &bds{store}, + limiter: limiter, + } + + // start job(s) + if err := d.StartAutoSync(); err != nil { + l.Error(). + Err(err). + Msg("Failed initialising cron jobs") + return + } + } + + return trigger, nil +} + +type drive struct { + ID string + Rewriter autoscan.Rewriter + Allowed filterer + ScanTime func() time.Time +} + +type daemon struct { + callback autoscan.ProcessorFunc + cronSchedule string + priority int + drives []drive + bernard *lowe.Bernard + store *bds + log zerolog.Logger + limiter *rateLimiter +} + +type syncJob struct { + log zerolog.Logger + attempts int + errors []error + + cron *cron.Cron + jobID cron.EntryID + fn func() error +} + +func (s *syncJob) Run() { + // increase attempt counter + s.attempts++ + + // run job + err := s.fn() + + // handle job response + switch { + case err == nil: + // job completed successfully + s.attempts = 0 + s.errors = s.errors[:0] + return + + case errors.Is(err, lowe.ErrInvalidCredentials), errors.Is(err, ds.ErrDataAnomaly), errors.Is(err, lowe.ErrNetwork): + //retryable error occurred + s.log.Trace(). + Err(err). + Int("attempts", s.attempts). + Msg("Retryable error occurred while syncing drive") + s.errors = append(s.errors, err) + + case errors.Is(err, autoscan.ErrFatal): + // fatal error occurred, we cannot recover from this safely + s.log.Error(). + Err(err). + Msg("Fatal error occurred while syncing drive, drive has been stopped...") + + s.cron.Remove(s.jobID) + return + + case err != nil: + // an un-expected/un-handled error occurred, this should be retryable with the same retry logic + s.log.Warn(). + Err(err). + Int("attempts", s.attempts). + Msg("Unexpected error occurred while syncing drive") + s.errors = append(s.errors, err) + } + + // abort if max retries reached + if s.attempts >= maxSyncRetries { + s.log.Error(). + Errs("error", s.errors). + Int("attempts", s.attempts). + Msg("Consecutive errors occurred while syncing drive, drive has been stopped...") + + s.cron.Remove(s.jobID) + } +} + +func newSyncJob(c *cron.Cron, log zerolog.Logger, job func() error) *syncJob { + return &syncJob{ + log: log, + attempts: 0, + errors: make([]error, 0), + cron: c, + fn: job, + } +} + +func (d daemon) StartAutoSync() error { + c := cron.New() + + for _, drive := range d.drives { + drive := drive + fullSync := false + l := d.withDriveLog(drive.ID) + + // full sync required? + _, err := d.store.PageToken(drive.ID) + switch { + case errors.Is(err, ds.ErrFullSync): + fullSync = true + case err != nil: + return fmt.Errorf("%v: determining if full sync required: %v: %w", + drive.ID, err, autoscan.ErrFatal) + } + + // create job + job := newSyncJob(c, l, func() error { + // acquire lock + if err := d.limiter.Acquire(1); err != nil { + return fmt.Errorf("%v: acquiring sync semaphore: %v: %w", + drive.ID, err, autoscan.ErrFatal) + } + defer d.limiter.Release(1) + + // full sync + if fullSync { + l.Info().Msg("Starting full sync") + start := time.Now() + + if err := d.bernard.FullSync(drive.ID); err != nil { + return fmt.Errorf("%v: performing full sync: %w", drive.ID, err) + } + + l.Info().Msgf("Finished full sync in %s", time.Since(start)) + fullSync = false + return nil + } + + // create partial sync + dh, diff := d.store.NewDifferencesHook() + ph := NewPostProcessBernardDiff(drive.ID, d.store, diff) + ch, paths := NewPathsHook(drive.ID, d.store, diff) + + l.Trace().Msg("Running partial sync") + start := time.Now() + + // do partial sync + err := d.bernard.PartialSync(drive.ID, dh, ph, ch) + if err != nil { + return fmt.Errorf("%v: performing partial sync: %w", drive.ID, err) + } + + l.Trace(). + Int("files_added", len(paths.AddedFiles)). + Int("files_changed", len(paths.ChangedFiles)). + Int("files_removed", len(paths.RemovedFiles)). + Msgf("Partial sync finished in %s", time.Since(start)) + + // translate paths to scan task + task := d.getScanTask(&(drive), paths) + + // move scans to processor + if len(task.scans) > 0 { + l.Trace(). + Interface("scans", task.scans). + Msg("Scans moving to processor") + + err := d.callback(task.scans...) + if err != nil { + return fmt.Errorf("%v: moving scans to processor: %v: %w", + drive.ID, err, autoscan.ErrFatal) + } + + l.Info(). + Int("files_added", task.adds). + Int("files_changed", task.changes). + Int("files_removed", task.removes). + Msg("Scan moved to processor") + } + + return nil + }) + + id, err := c.AddJob(d.cronSchedule, cron.NewChain(cron.SkipIfStillRunning(cron.DiscardLogger)).Then(job)) + if err != nil { + return fmt.Errorf("%v: creating auto sync job for drive: %w", drive.ID, err) + } + + job.jobID = id + } + + c.Start() + return nil +} + +type scanTask struct { + scans []autoscan.Scan + adds int + changes int + removes int +} + +func (d daemon) getScanTask(drive *drive, paths *Paths) *scanTask { + pathMap := make(map[string]int) + task := &scanTask{ + scans: make([]autoscan.Scan, 0), + adds: 0, + changes: 0, + removes: 0, + } + + for _, p := range paths.AddedFiles { + // rewrite path + rewritten := drive.Rewriter(p) + + // check if path already seen + if _, ok := pathMap[rewritten]; ok { + // already a scan task present + continue + } else { + pathMap[rewritten] = 1 + } + + // is this path allowed? + if !drive.Allowed(rewritten) { + continue + } + + // add scan task + dir, file := filepath.Split(rewritten) + task.scans = append(task.scans, autoscan.Scan{ + Folder: filepath.Clean(dir), + File: file, + Priority: d.priority, + Retries: 0, + Removed: false, + Time: drive.ScanTime(), + }) + + task.adds++ + } + + for _, p := range paths.ChangedFiles { + // rewrite path + rewritten := drive.Rewriter(p) + + // check if path already seen + if _, ok := pathMap[rewritten]; ok { + // already a scan task present + continue + } else { + pathMap[rewritten] = 1 + } + + // is this path allowed? + if !drive.Allowed(rewritten) { + continue + } + + // add scan task + dir, file := filepath.Split(filepath.Clean(rewritten)) + task.scans = append(task.scans, autoscan.Scan{ + Folder: filepath.Clean(dir), + File: file, + Priority: d.priority, + Retries: 0, + Removed: false, + Time: drive.ScanTime(), + }) + + task.changes++ + } + + for _, p := range paths.RemovedFiles { + // rewrite path + rewritten := drive.Rewriter(p) + + // check if path already seen + if _, ok := pathMap[rewritten]; ok { + // already a scan task present + continue + } else { + pathMap[rewritten] = 1 + } + + // is this path allowed? + if !drive.Allowed(rewritten) { + continue + } + + // add scan task + dir, file := filepath.Split(rewritten) + task.scans = append(task.scans, autoscan.Scan{ + Folder: filepath.Clean(dir), + File: file, + Priority: d.priority, + Retries: 0, + Removed: true, + Time: drive.ScanTime(), + }) + + task.removes++ + } + + return task +} + +func (d daemon) withDriveLog(driveID string) zerolog.Logger { + return d.log.With().Str("drive_id", driveID).Logger() +} diff --git a/triggers/bernard/datastore.go b/triggers/bernard/datastore.go new file mode 100644 index 00000000..fc06ca21 --- /dev/null +++ b/triggers/bernard/datastore.go @@ -0,0 +1,166 @@ +package bernard + +import ( + "database/sql" + "errors" + "fmt" + "github.com/m-rots/bernard/datastore" + "github.com/m-rots/bernard/datastore/sqlite" +) + +type bds struct { + *sqlite.Datastore +} + +const sqlSelectFile = `SELECT id, name, parent, size, md5, trashed FROM file WHERE drive = $1 AND id = $2 LIMIT 1` + +func (d *bds) GetFile(driveID string, fileID string) (*datastore.File, error) { + f := new(datastore.File) + + row := d.DB.QueryRow(sqlSelectFile, driveID, fileID) + err := row.Scan(&f.ID, &f.Name, &f.Parent, &f.Size, &f.MD5, &f.Trashed) + + switch { + case errors.Is(err, sql.ErrNoRows): + return nil, fmt.Errorf("%v: file not found: %w", fileID, sql.ErrNoRows) + case err != nil: + return nil, err + default: + break + } + + return f, nil +} + +const sqlSelectFolder = `SELECT id, name, trashed, parent FROM folder WHERE drive = $1 AND id = $2 LIMIT 1` + +func (d *bds) GetFolder(driveID string, folderID string) (*datastore.Folder, error) { + f := new(datastore.Folder) + + row := d.DB.QueryRow(sqlSelectFolder, driveID, folderID) + err := row.Scan(&f.ID, &f.Name, &f.Trashed, &f.Parent) + + switch { + case errors.Is(err, sql.ErrNoRows): + return nil, fmt.Errorf("%v: folder not found: %w", folderID, sql.ErrNoRows) + case err != nil: + return nil, err + default: + break + } + + return f, nil +} + +const sqlSelectFolderDescendants = ` +with cte_Folders as ( + -- Root Folder + SELECT + 'folder' as [type] + , f.id + , f.drive + , f.name + , f.trashed + , f.parent + FROM folder f + WHERE f.drive = $1 AND f.id = $2 + -- Descendant folders + UNION + SELECT + 'folder' as [type] + , f.id + , f.drive + , f.name + , f.trashed + , f.parent + FROM cte_Folders cte + JOIN folder f ON f.drive = cte.drive AND f.parent = cte.id + WHERE cte.[type] = 'folder' +), cte_Combined as ( + -- Folders + SELECT + * + FROM cte_Folders cte + + -- Files + UNION + SELECT + 'file' as [type] + , f.id + , f.drive + , f.name + , f.trashed + , f.parent + FROM cte_Folders cte + JOIN file f ON f.drive = cte.drive AND f.parent = cte.id + WHERE cte.[type] = 'folder' +) +SELECT DISTINCT +* +FROM cte_Combined cte +` + +type folderDescendants struct { + Folders map[string]datastore.Folder + Files map[string]datastore.File +} + +func (d *bds) GetFolderDescendants(driveID string, folderID string) (*folderDescendants, error) { + descendants := &folderDescendants{ + Folders: make(map[string]datastore.Folder), + Files: make(map[string]datastore.File), + } + + if driveID == folderID { + // never return descendants when folder is a drive + return descendants, nil + } + + rows, err := d.DB.Query(sqlSelectFolderDescendants, driveID, folderID) + if errors.Is(err, sql.ErrNoRows) { + return descendants, nil + } + + if err != nil { + return nil, err + } + + defer rows.Close() + + type Row struct { + Type string + ID string + Drive string + Name string + Trashed bool + Parent string + } + + for rows.Next() { + desc := &Row{} + + err = rows.Scan(&desc.Type, &desc.ID, &desc.Drive, &desc.Name, &desc.Trashed, &desc.Parent) + if err != nil { + return nil, err + } + + switch desc.Type { + case "folder": + descendants.Folders[desc.ID] = datastore.Folder{ + ID: desc.ID, + Name: desc.Name, + Parent: desc.Parent, + Trashed: desc.Trashed, + } + case "file": + descendants.Files[desc.ID] = datastore.File{ + ID: desc.ID, + Name: desc.Name, + Parent: desc.Parent, + Trashed: desc.Trashed, + } + } + } + + return descendants, rows.Err() +} diff --git a/triggers/bernard/filters.go b/triggers/bernard/filters.go new file mode 100644 index 00000000..2525ab41 --- /dev/null +++ b/triggers/bernard/filters.go @@ -0,0 +1,59 @@ +package bernard + +import ( + "fmt" + "regexp" +) + +type filterer func(string) bool + +func newFilterer(includes []string, excludes []string) (filterer, error) { + reIncludes := make([]regexp.Regexp, 0) + reExcludes := make([]regexp.Regexp, 0) + + // compile patterns + for _, pattern := range includes { + re, err := regexp.Compile(pattern) + if err != nil { + return nil, fmt.Errorf("compiling include: %v: %w", pattern, err) + } + reIncludes = append(reIncludes, *re) + } + + for _, pattern := range excludes { + re, err := regexp.Compile(pattern) + if err != nil { + return nil, fmt.Errorf("compiling exclude: %v: %w", pattern, err) + } + reExcludes = append(reExcludes, *re) + } + + // create filterer + var fn filterer = func(string) bool { return true } + + switch { + case len(includes) > 0: + // includes + fn = func(path string) bool { + for _, re := range reIncludes { + if re.MatchString(path) { + return true + } + } + return false + } + + case len(excludes) > 0: + // excludes + fn = func(path string) bool { + for _, re := range reExcludes { + if re.MatchString(path) { + return false + } + } + return true + } + } + + return fn, nil +} diff --git a/triggers/bernard/limiter.go b/triggers/bernard/limiter.go new file mode 100644 index 00000000..183edc42 --- /dev/null +++ b/triggers/bernard/limiter.go @@ -0,0 +1,63 @@ +package bernard + +import ( + "context" + "golang.org/x/sync/semaphore" + "golang.org/x/time/rate" + "sync" + "time" +) + +const ( + // how many requests can be sent per second by all drives using the same account file + requestLimit = 8 + // how many drives can run at once (at the trigger level), e.g. 2 triggers, with 5 drives each. + syncLimit = 5 +) + +type rateLimiter struct { + ctx context.Context + rl *rate.Limiter + sem *semaphore.Weighted +} + +func (r *rateLimiter) Wait() { + _ = r.rl.Wait(r.ctx) +} + +func (r *rateLimiter) Acquire(n int64) error { + return r.sem.Acquire(r.ctx, n) +} + +func (r *rateLimiter) Release(n int64) { + r.sem.Release(n) +} + +func newRateLimiter() *rateLimiter { + return &rateLimiter{ + ctx: context.Background(), + rl: rate.NewLimiter(rate.Every(time.Second/time.Duration(requestLimit)), requestLimit), + sem: semaphore.NewWeighted(int64(syncLimit)), + } +} + +var ( + limiters = make(map[string]*rateLimiter) + lock = &sync.Mutex{} +) + +func getRateLimiter(account string) (*rateLimiter, error) { + lock.Lock() + defer lock.Unlock() + + // return existing limiter for the account + if limiter, ok := limiters[account]; ok { + return limiter, nil + } + + // add limiter to map + limiter := newRateLimiter() + limiters[account] = limiter + + return limiter, nil +} diff --git a/triggers/bernard/paths.go b/triggers/bernard/paths.go new file mode 100644 index 00000000..54ec0951 --- /dev/null +++ b/triggers/bernard/paths.go @@ -0,0 +1,245 @@ +package bernard + +import ( + "fmt" + "github.com/m-rots/bernard" + "github.com/m-rots/bernard/datastore" + "github.com/m-rots/bernard/datastore/sqlite" + "path/filepath" +) + +type Paths struct { + AddedFiles []string + ChangedFiles []string + RemovedFiles []string +} + +func NewPathsHook(driveID string, store *bds, diff *sqlite.Difference) (bernard.Hook, *Paths) { + var paths Paths + + hook := func(drive datastore.Drive, files []datastore.File, folders []datastore.Folder, removed []string) error { + folderMaps := getDiffFolderMaps(diff) + fileMaps := getDiffFileMaps(diff) + + // get added file paths + for _, f := range diff.AddedFiles { + p, err := getFolderPath(store, driveID, f.Parent, folderMaps.Current) + if err != nil { + return fmt.Errorf("failed building file path for added file %v: %w", f.ID, err) + } + + paths.AddedFiles = append(paths.AddedFiles, filepath.Join(p, f.Name)) + } + + // get changed file paths + for _, f := range diff.ChangedFiles { + // new path + p, err := getFolderPath(store, driveID, f.New.Parent, folderMaps.Current) + if err != nil { + return fmt.Errorf("failed building file path for changed file %v: %w", f.New.ID, err) + } + + paths.ChangedFiles = append(paths.ChangedFiles, filepath.Join(p, f.New.Name)) + + // old (removed) path + if !f.Old.Trashed && f.Old.ID != "" { + p, err := getFolderPath(store, driveID, f.Old.Parent, folderMaps.Old) + if err != nil { + return fmt.Errorf("failed building removed file path for changed file %v: %w", f.Old.ID, err) + } + + paths.RemovedFiles = append(paths.RemovedFiles, filepath.Join(p, f.Old.Name)) + } + } + + // get removed file paths + for _, f := range diff.RemovedFiles { + p, err := getFolderPath(store, driveID, f.Parent, folderMaps.Old) + if err != nil { + return fmt.Errorf("failed building file path for removed file %v: %w", f.ID, err) + } + + paths.RemovedFiles = append(paths.RemovedFiles, filepath.Join(p, f.Name)) + } + + // get new and old roots for changed folders + newRoots, oldRoots := getRootChangedFolders(diff) + + // get changed file paths (descendants of newRoots) + changedNewFiles, err := getChangedFolderFiles(store, driveID, newRoots, folderMaps.Current, fileMaps.Current) + if err != nil { + return fmt.Errorf("failed building changed folder descendant files: %w", err) + } + + for _, f := range changedNewFiles { + p, err := getFolderPath(store, driveID, f.Parent, folderMaps.Current) + if err != nil { + return fmt.Errorf("failed building changed file path for change folder "+ + "descendant file %v: %w", f.ID, err) + } + + paths.ChangedFiles = append(paths.ChangedFiles, filepath.Join(p, f.Name)) + } + + // get descendents of changed folders (old paths - removed) + removedOldFiles, err := getChangedFolderFiles(store, driveID, oldRoots, folderMaps.Old, fileMaps.Old) + if err != nil { + return fmt.Errorf("failed building removed folder descendant files: %w", err) + } + + for _, f := range removedOldFiles { + if f.Trashed { + continue + } + + p, err := getFolderPath(store, driveID, f.Parent, folderMaps.Old) + if err != nil { + return fmt.Errorf("failed building removed file path for change folder "+ + "descendant file %v: %w", f.ID, err) + } + + paths.RemovedFiles = append(paths.RemovedFiles, filepath.Join(p, f.Name)) + } + + return nil + } + + return hook, &paths +} + +func getChangedFolderFiles(store *bds, driveID string, rootFolders []datastore.Folder, + folderMap map[string]datastore.Folder, fileMap map[string]datastore.File) ([]datastore.File, error) { + changedFiles := make([]datastore.File, 0) + + for _, folder := range rootFolders { + // get descendants + descendants, err := store.GetFolderDescendants(driveID, folder.ID) + if err != nil { + return nil, err + } + + // iterate folder descendants (populating folderMap with missing) + for foID, fo := range descendants.Folders { + if _, ok := folderMap[foID]; ok { + continue + } + + folderMap[foID] = fo + } + + // iterate descendants + for fileID, file := range descendants.Files { + // is there already a change for this file? + if _, ok := fileMap[fileID]; ok { + continue + } + + fileMap[fileID] = file + changedFiles = append(changedFiles, file) + } + } + + return changedFiles, nil +} + +func getRootChangedFolders(diff *sqlite.Difference) ([]datastore.Folder, []datastore.Folder) { + newFolders := make([]datastore.Folder, 0) + oldFolders := make([]datastore.Folder, 0) + + for _, f := range diff.ChangedFolders { + newFolders = append(newFolders, f.New) + oldFolders = append(oldFolders, f.Old) + } + + newRoots, _ := datastore.RootFolders(newFolders) + oldRoots, _ := datastore.RootFolders(oldFolders) + + return newRoots, oldRoots +} + +type diffFileMaps struct { + Current map[string]datastore.File + Old map[string]datastore.File +} + +func getDiffFileMaps(diff *sqlite.Difference) *diffFileMaps { + currentFiles := make(map[string]datastore.File) + oldFiles := make(map[string]datastore.File) + + for i, f := range diff.AddedFiles { + currentFiles[f.ID] = diff.AddedFiles[i] + } + + for i, f := range diff.ChangedFiles { + currentFiles[f.New.ID] = diff.ChangedFiles[i].New + oldFiles[f.Old.ID] = diff.ChangedFiles[i].Old + } + + return &diffFileMaps{ + Current: currentFiles, + Old: oldFiles, + } +} + +type diffFolderMaps struct { + Current map[string]datastore.Folder + Old map[string]datastore.Folder +} + +func getDiffFolderMaps(diff *sqlite.Difference) *diffFolderMaps { + currentFolders := make(map[string]datastore.Folder) + oldFolders := make(map[string]datastore.Folder) + + for i, f := range diff.AddedFolders { + currentFolders[f.ID] = diff.AddedFolders[i] + oldFolders[f.ID] = diff.AddedFolders[i] + } + + for i, f := range diff.ChangedFolders { + currentFolders[f.New.ID] = diff.ChangedFolders[i].New + oldFolders[f.Old.ID] = diff.ChangedFolders[i].Old + } + + return &diffFolderMaps{ + Current: currentFolders, + Old: oldFolders, + } +} + +func getFolderPath(store *bds, driveId string, folderId string, folderMap map[string]datastore.Folder) (string, error) { + path := "" + + // get top folder + topFolder, ok := folderMap[folderId] + if !ok { + f, err := store.GetFolder(driveId, folderId) + if err != nil { + return path, fmt.Errorf("could not get folder %v: %w", folderId, err) + } + + topFolder = *f + } + + // set logic variables + path = topFolder.Name + nextFolderId := topFolder.Parent + + // get folder paths + for nextFolderId != "" && nextFolderId != driveId { + f, ok := folderMap[nextFolderId] + if !ok { + df, err := store.GetFolder(driveId, nextFolderId) + if err != nil { + return path, fmt.Errorf("could not get folder %v: %w", nextFolderId, err) + } + + f = *df + folderMap[f.ID] = f + } + + path = filepath.Join(f.Name, path) + nextFolderId = f.Parent + } + + return path, nil +} diff --git a/triggers/bernard/postprocess.go b/triggers/bernard/postprocess.go new file mode 100644 index 00000000..28de4b7f --- /dev/null +++ b/triggers/bernard/postprocess.go @@ -0,0 +1,87 @@ +package bernard + +import ( + "fmt" + "github.com/m-rots/bernard" + "github.com/m-rots/bernard/datastore" + "github.com/m-rots/bernard/datastore/sqlite" +) + +func NewPostProcessBernardDiff(driveID string, store *bds, diff *sqlite.Difference) bernard.Hook { + hook := func(drive datastore.Drive, files []datastore.File, folders []datastore.Folder, removed []string) error { + // dont include removes for files already known as trashed + for i := 0; i < len(diff.RemovedFiles); i++ { + df := diff.RemovedFiles[i] + + ef, err := store.GetFile(driveID, df.ID) + if err != nil { + return fmt.Errorf("failed retrieving file (id: %v): %w", df.ID, err) + } + + switch { + case ef.Trashed && df.Trashed: + // this removed file was already known as trashed (removed to us) + diff.RemovedFiles = append(diff.RemovedFiles[:i], diff.RemovedFiles[i+1:]...) + i-- + } + } + + // dont include removes for folders already known as trashed + for i := 0; i < len(diff.RemovedFolders); i++ { + df := diff.RemovedFolders[i] + + ef, err := store.GetFolder(driveID, df.ID) + if err != nil { + return fmt.Errorf("failed retrieving folder (id: %v): %w", df.ID, err) + } + + switch { + case ef.Trashed && df.Trashed: + // this removed folder was already known as trashed (removed to us) + diff.RemovedFolders = append(diff.RemovedFolders[:i], diff.RemovedFolders[i+1:]...) + i-- + } + } + + // remove changed files that were trashed or un-trashed + for i := 0; i < len(diff.ChangedFiles); i++ { + df := diff.ChangedFiles[i].New + ef := diff.ChangedFiles[i].Old + + switch { + case ef.Trashed && !df.Trashed: + // existing state was trashed, but new state is not + diff.AddedFiles = append(diff.AddedFiles, df) + diff.ChangedFiles = append(diff.ChangedFiles[:i], diff.ChangedFiles[i+1:]...) + i-- + case !ef.Trashed && df.Trashed: + // new state is trashed, existing state is not + diff.RemovedFiles = append(diff.RemovedFiles, df) + diff.ChangedFiles = append(diff.ChangedFiles[:i], diff.ChangedFiles[i+1:]...) + i-- + } + } + + for i := 0; i < len(diff.ChangedFolders); i++ { + df := diff.ChangedFolders[i].New + ef := diff.ChangedFolders[i].Old + + switch { + case ef.Trashed && !df.Trashed: + // existing state was trashed, but new state is not + diff.AddedFolders = append(diff.AddedFolders, df) + diff.ChangedFolders = append(diff.ChangedFolders[:i], diff.ChangedFolders[i+1:]...) + i-- + case !ef.Trashed && df.Trashed: + // new state is trashed, existing state is not + diff.RemovedFolders = append(diff.RemovedFolders, df) + diff.ChangedFolders = append(diff.ChangedFolders[:i], diff.ChangedFolders[i+1:]...) + i-- + } + } + + return nil + } + + return hook +} diff --git a/triggers/lidarr/lidarr.go b/triggers/lidarr/lidarr.go index 94b345ae..228f706f 100644 --- a/triggers/lidarr/lidarr.go +++ b/triggers/lidarr/lidarr.go @@ -4,16 +4,17 @@ import ( "encoding/json" "net/http" "path" + "time" "github.com/cloudbox/autoscan" "github.com/rs/zerolog/hlog" ) type Config struct { - Name string `yaml:"name"` - Priority int `yaml:"priority"` - Rewrite autoscan.Rewrite `yaml:"rewrite"` - Verbosity string `yaml:"verbosity"` + Name string `yaml:"name"` + Priority int `yaml:"priority"` + Rewrite []autoscan.Rewrite `yaml:"rewrite"` + Verbosity string `yaml:"verbosity"` } // New creates an autoscan-compatible HTTP Trigger for Lidarr webhooks. @@ -90,6 +91,8 @@ func (h handler) ServeHTTP(rw http.ResponseWriter, r *http.Request) { File: path.Base(fullPath), Folder: path.Dir(fullPath), Priority: h.priority, + Removed: false, + Time: now(), }) } @@ -106,3 +109,5 @@ func (h handler) ServeHTTP(rw http.ResponseWriter, r *http.Request) { Int("files", len(scans)). Msg("Scan moved to processor") } + +var now = time.Now diff --git a/triggers/lidarr/lidarr_test.go b/triggers/lidarr/lidarr_test.go index 131080ba..f9c622be 100644 --- a/triggers/lidarr/lidarr_test.go +++ b/triggers/lidarr/lidarr_test.go @@ -7,6 +7,7 @@ import ( "os" "reflect" "testing" + "time" "github.com/cloudbox/autoscan" ) @@ -31,10 +32,15 @@ func TestHandler(t *testing.T) { standardConfig := Config{ Name: "lidarr", Priority: 5, - Rewrite: autoscan.Rewrite{ + Rewrite: []autoscan.Rewrite{{ From: "/Music/*", To: "/mnt/unionfs/Media/Music/$1", - }, + }}, + } + + currentTime := time.Now() + now = func() time.Time { + return currentTime } var testCases = []Test{ @@ -51,21 +57,25 @@ func TestHandler(t *testing.T) { File: "01 - Down.mp3", Folder: "/mnt/unionfs/Media/Music/Marshmello/Joytime III (2019)", Priority: 5, + Time: currentTime, }, { File: "02 - Run It Up.mp3", Folder: "/mnt/unionfs/Media/Music/Marshmello/Joytime III (2019)", Priority: 5, + Time: currentTime, }, { File: "03 - Put Yo Hands Up.mp3", Folder: "/mnt/unionfs/Media/Music/Marshmello/Joytime III (2019)", Priority: 5, + Time: currentTime, }, { File: "04 - Let’s Get Down.mp3", Folder: "/mnt/unionfs/Media/Music/Marshmello/Joytime III (2019)", Priority: 5, + Time: currentTime, }, }, }, diff --git a/triggers/radarr/radarr.go b/triggers/radarr/radarr.go index 85c824fd..c57a516e 100644 --- a/triggers/radarr/radarr.go +++ b/triggers/radarr/radarr.go @@ -4,16 +4,17 @@ import ( "encoding/json" "net/http" "path" + "time" "github.com/cloudbox/autoscan" "github.com/rs/zerolog/hlog" ) type Config struct { - Name string `yaml:"name"` - Priority int `yaml:"priority"` - Rewrite autoscan.Rewrite `yaml:"rewrite"` - Verbosity string `yaml:"verbosity"` + Name string `yaml:"name"` + Priority int `yaml:"priority"` + Rewrite []autoscan.Rewrite `yaml:"rewrite"` + Verbosity string `yaml:"verbosity"` } // New creates an autoscan-compatible HTTP Trigger for Radarr webhooks. @@ -86,6 +87,8 @@ func (h handler) ServeHTTP(rw http.ResponseWriter, r *http.Request) { File: path.Base(fullPath), Folder: path.Dir(fullPath), Priority: h.priority, + Removed: false, + Time: now(), } err = h.callback(scan) @@ -100,3 +103,5 @@ func (h handler) ServeHTTP(rw http.ResponseWriter, r *http.Request) { Str("path", fullPath). Msg("Scan moved to processor") } + +var now = time.Now diff --git a/triggers/radarr/radarr_test.go b/triggers/radarr/radarr_test.go index 5c689a2c..3dbcde3a 100644 --- a/triggers/radarr/radarr_test.go +++ b/triggers/radarr/radarr_test.go @@ -7,6 +7,7 @@ import ( "os" "reflect" "testing" + "time" "github.com/cloudbox/autoscan" ) @@ -31,10 +32,15 @@ func TestHandler(t *testing.T) { standardConfig := Config{ Name: "radarr", Priority: 5, - Rewrite: autoscan.Rewrite{ + Rewrite: []autoscan.Rewrite{{ From: "/Movies/*", To: "/mnt/unionfs/Media/Movies/$1", - }, + }}, + } + + currentTime := time.Now() + now = func() time.Time { + return currentTime } var testCases = []Test{ @@ -51,6 +57,7 @@ func TestHandler(t *testing.T) { File: "Interstellar.2014.UHD.BluRay.2160p.REMUX.mkv", Folder: "/mnt/unionfs/Media/Movies/Interstellar (2014)", Priority: 5, + Time: currentTime, }, }, }, @@ -61,10 +68,10 @@ func TestHandler(t *testing.T) { Config: Config{ Name: "radarr", Priority: 3, - Rewrite: autoscan.Rewrite{ + Rewrite: []autoscan.Rewrite{{ From: "/data/*", To: "/Media/$1", - }, + }}, }, Fixture: "testdata/parasite.json", }, @@ -75,6 +82,7 @@ func TestHandler(t *testing.T) { File: "Parasite.2019.2160p.UHD.BluRay.REMUX.HEVC.TrueHD.Atmos.7.1.mkv", Folder: "/Media/Movies/Parasite (2019)", Priority: 3, + Time: currentTime, }, }, }, diff --git a/triggers/sonarr/sonarr.go b/triggers/sonarr/sonarr.go index eaafebd8..43898da3 100644 --- a/triggers/sonarr/sonarr.go +++ b/triggers/sonarr/sonarr.go @@ -4,16 +4,17 @@ import ( "encoding/json" "net/http" "path" + "time" "github.com/cloudbox/autoscan" "github.com/rs/zerolog/hlog" ) type Config struct { - Name string `yaml:"name"` - Priority int `yaml:"priority"` - Rewrite autoscan.Rewrite `yaml:"rewrite"` - Verbosity string `yaml:"verbosity"` + Name string `yaml:"name"` + Priority int `yaml:"priority"` + Rewrite []autoscan.Rewrite `yaml:"rewrite"` + Verbosity string `yaml:"verbosity"` } // New creates an autoscan-compatible HTTP Trigger for Sonarr webhooks. @@ -86,6 +87,8 @@ func (h handler) ServeHTTP(rw http.ResponseWriter, r *http.Request) { File: path.Base(fullPath), Folder: path.Dir(fullPath), Priority: h.priority, + Removed: false, + Time: now(), } err = h.callback(scan) @@ -100,3 +103,5 @@ func (h handler) ServeHTTP(rw http.ResponseWriter, r *http.Request) { Str("path", fullPath). Msg("Scan moved to processor") } + +var now = time.Now diff --git a/triggers/sonarr/sonarr_test.go b/triggers/sonarr/sonarr_test.go index 5ea861f9..1f407ee3 100644 --- a/triggers/sonarr/sonarr_test.go +++ b/triggers/sonarr/sonarr_test.go @@ -7,6 +7,7 @@ import ( "os" "reflect" "testing" + "time" "github.com/cloudbox/autoscan" ) @@ -31,10 +32,15 @@ func TestHandler(t *testing.T) { standardConfig := Config{ Name: "sonarr", Priority: 5, - Rewrite: autoscan.Rewrite{ + Rewrite: []autoscan.Rewrite{{ From: "/TV/*", To: "/mnt/unionfs/Media/TV/$1", - }, + }}, + } + + currentTime := time.Now() + now = func() time.Time { + return currentTime } var testCases = []Test{ @@ -51,6 +57,7 @@ func TestHandler(t *testing.T) { File: "Westworld.S01E01.The.Original.2160p.TrueHD.Atmos.7.1.HEVC.REMUX.mkv", Folder: "/mnt/unionfs/Media/TV/Westworld/Season 1", Priority: 5, + Time: currentTime, }, }, },