Skip to content

Commit

Permalink
cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
refik committed Jun 17, 2014
1 parent d1b9dd7 commit d501689
Showing 1 changed file with 102 additions and 102 deletions.
204 changes: 102 additions & 102 deletions main.go
Original file line number Diff line number Diff line change
@@ -1,28 +1,28 @@
package main

import (
"os"
"io"
"fmt"
"log"
"time"
"flag"
"path"
"sync"
"syscall"
"strconv"
"net/url"
"net/http"
"io/ioutil"
"encoding/json"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"path"
"strconv"
"sync"
"syscall"
"time"
)

// Settings
var RemoteFolderId int
var RemoteFolderName = flag.String("putio-folder", "",
"putio folder name under your root")
var RemoteFolderName = flag.String("putio-folder", "", "putio folder name under your root")
var AccessToken = flag.String("oauth-token", "", "Oauth Token")
var LocalFolderPath = flag.String("local-path", "", "local folder to fetch")

const ApiUrl = "https://api.put.io/v2/"
const DownloadExtension = ".putiodl"
const MaxConnection = 10
Expand Down Expand Up @@ -80,7 +80,7 @@ func FilesListRequest(parentId int) []File {
params := map[string]string{"parent_id": strconv.Itoa(parentId)}
folderUrl := MakeUrl("files/list", params)

log.Println(folderUrl)
log.Println(folderUrl)
resp, err := http.Get(folderUrl)
if err != nil {
log.Fatal(err)
Expand All @@ -99,123 +99,123 @@ func FilesListRequest(parentId int) []File {
}

func WalkAndDownload(parentId int, folderPath string) {
// Creating if the encapsulating folder is absent
if _, err := os.Stat(folderPath); err != nil {
// Creating if the encapsulating folder is absent
if _, err := os.Stat(folderPath); err != nil {
err := os.Mkdir(folderPath, 0755)
if err != nil {
log.Fatal(err)
}
}
}

files := FilesListRequest(parentId)
log.Println("Walking in", folderPath)

for _, file := range files {
path := path.Join(folderPath, file.Name)
path := path.Join(folderPath, file.Name)
if file.ContentType == "application/x-directory" {
go WalkAndDownload(file.Id, path)
} else {
if _, err := os.Stat(path); err != nil {
log.Println(err)
log.Println(err)
DownloadFile(&file, path)
}
}
}
}

func DownloadChunk(file *File, fp *os.File, offset int,
size int, chunkWg *sync.WaitGroup) {
defer chunkWg.Done()
log.Println("Downloading chunk starting from:", offset, "bytes:", size)

req, err := http.NewRequest("GET", file.DownloadUrl(), nil)
if err != nil {
log.Fatal(err)
}

rangeHeader := fmt.Sprintf("bytes=%d-%d\r\n", offset, offset + size)
log.Println(rangeHeader)
req.Header.Add("Range", rangeHeader)

resp, err := http.DefaultClient.Do(req)
if err != nil {
log.Fatal(err)
}
defer resp.Body.Close()

buffer := make([]byte, 32*1024)
for {
nr, er := resp.Body.Read(buffer)
if nr > 0 {
nw, ew := fp.WriteAt(buffer[0:nr], int64(offset))
offset += nw
if ew != nil {
log.Fatal(ew)
}
}
if er == io.EOF {
log.Println("Seen EOF")
break
}
if er != nil {
log.Println(er)
break
}
}
size int, chunkWg *sync.WaitGroup) {
defer chunkWg.Done()
log.Println("Downloading chunk starting from:", offset, "bytes:", size)

req, err := http.NewRequest("GET", file.DownloadUrl(), nil)
if err != nil {
log.Fatal(err)
}

rangeHeader := fmt.Sprintf("bytes=%d-%d\r\n", offset, offset+size)
log.Println(rangeHeader)
req.Header.Add("Range", rangeHeader)

resp, err := http.DefaultClient.Do(req)
if err != nil {
log.Fatal(err)
}
defer resp.Body.Close()

buffer := make([]byte, 32*1024)
for {
nr, er := resp.Body.Read(buffer)
if nr > 0 {
nw, ew := fp.WriteAt(buffer[0:nr], int64(offset))
offset += nw
if ew != nil {
log.Fatal(ew)
}
}
if er == io.EOF {
log.Println("Seen EOF")
break
}
if er != nil {
log.Println(er)
break
}
}
}

func DownloadFile(file *File, path string) {
// Creating a waitgroup to wait for all chunks to be
// downloaded before exiting
var chunkWg sync.WaitGroup

fp, err := os.Create(path + DownloadExtension)
if err != nil {
log.Fatal(err)
}
defer fp.Close()

// Allocating space for the file
syscall.Fallocate(int(fp.Fd()), 0, 0, int64(file.Size))

chunkSize := file.Size / MaxConnection
excessBytes := file.Size % MaxConnection

log.Println("Chunk size:", chunkSize, "Excess:", excessBytes)

offset := 0
for i := MaxConnection; i > 0; i-- {
if i == 1 {
// Add excess bytes to last connection
chunkSize += excessBytes
}
chunkWg.Add(1)
go DownloadChunk(file, fp, offset, chunkSize, &chunkWg)
offset += chunkSize
}
chunkWg.Wait()

fp.Close()
er := os.Rename(path + DownloadExtension, path)
if er != nil {
log.Fatal(err)
}

log.Println("Download completed")
// Creating a waitgroup to wait for all chunks to be
// downloaded before exiting
var chunkWg sync.WaitGroup

fp, err := os.Create(path + DownloadExtension)
if err != nil {
log.Fatal(err)
}
defer fp.Close()

// Allocating space for the file
syscall.Fallocate(int(fp.Fd()), 0, 0, int64(file.Size))

chunkSize := file.Size / MaxConnection
excessBytes := file.Size % MaxConnection

log.Println("Chunk size:", chunkSize, "Excess:", excessBytes)

offset := 0
for i := MaxConnection; i > 0; i-- {
if i == 1 {
// Add excess bytes to last connection
chunkSize += excessBytes
}
chunkWg.Add(1)
go DownloadChunk(file, fp, offset, chunkSize, &chunkWg)
offset += chunkSize
}
chunkWg.Wait()

fp.Close()
er := os.Rename(path+DownloadExtension, path)
if er != nil {
log.Fatal(err)
}

log.Println("Download completed")
}

func init() {
flag.Parse()
flag.Parse()
SaveRemoteFolderId()
}

func main() {
log.Println("Starting...")

for {
WalkAndDownload(RemoteFolderId, *LocalFolderPath)
time.Sleep(10 * time.Minute)
}
for {
WalkAndDownload(RemoteFolderId, *LocalFolderPath)
time.Sleep(10 * time.Minute)
}

log.Println("Exiting...")
}

0 comments on commit d501689

Please sign in to comment.