diff options
| author | Petter Rasmussen | 2016-02-21 21:03:26 +0100 |
|---|---|---|
| committer | Petter Rasmussen | 2016-02-21 21:03:26 +0100 |
| commit | 1973512dd8edca24df4124fb3dfac4a432a0d481 (patch) | |
| tree | c61daefa5cf24eb2211ac816862697f9e0676d86 /drive | |
| parent | 701c7f1991ae765a51b0b7404d1edbb2dc523055 (diff) | |
| download | gdrive-1973512dd8edca24df4124fb3dfac4a432a0d481.tar.bz2 | |
go fmt
Diffstat (limited to 'drive')
| -rw-r--r-- | drive/about.go | 76 | ||||
| -rw-r--r-- | drive/changes.go | 158 | ||||
| -rw-r--r-- | drive/delete.go | 46 | ||||
| -rw-r--r-- | drive/download.go | 416 | ||||
| -rw-r--r-- | drive/drive.go | 16 | ||||
| -rw-r--r-- | drive/errors.go | 18 | ||||
| -rw-r--r-- | drive/export.go | 172 | ||||
| -rw-r--r-- | drive/import.go | 80 | ||||
| -rw-r--r-- | drive/info.go | 94 | ||||
| -rw-r--r-- | drive/list.go | 206 | ||||
| -rw-r--r-- | drive/mkdir.go | 42 | ||||
| -rw-r--r-- | drive/path.go | 80 | ||||
| -rw-r--r-- | drive/progress.go | 148 | ||||
| -rw-r--r-- | drive/revision_delete.go | 36 | ||||
| -rw-r--r-- | drive/revision_download.go | 98 | ||||
| -rw-r--r-- | drive/revision_list.go | 92 | ||||
| -rw-r--r-- | drive/share.go | 152 | ||||
| -rw-r--r-- | drive/sync.go | 836 | ||||
| -rw-r--r-- | drive/sync_download.go | 558 | ||||
| -rw-r--r-- | drive/sync_list.go | 150 | ||||
| -rw-r--r-- | drive/sync_upload.go | 801 | ||||
| -rw-r--r-- | drive/timeout_reader.go | 114 | ||||
| -rw-r--r-- | drive/update.go | 106 | ||||
| -rw-r--r-- | drive/upload.go | 402 | ||||
| -rw-r--r-- | drive/util.go | 200 |
25 files changed, 2554 insertions, 2543 deletions
diff --git a/drive/about.go b/drive/about.go index 4c23ab8..c2f1643 100644 --- a/drive/about.go +++ b/drive/about.go @@ -1,68 +1,68 @@ package drive import ( - "io" - "fmt" - "text/tabwriter" + "fmt" + "io" + "text/tabwriter" ) type AboutArgs struct { - Out io.Writer - SizeInBytes bool + Out io.Writer + SizeInBytes bool } func (self *Drive) About(args AboutArgs) (err error) { - about, err := self.service.About.Get().Fields("maxImportSizes", "maxUploadSize", "storageQuota", "user").Do() - if err != nil { - return fmt.Errorf("Failed to get about: %s", err) - } + about, err := self.service.About.Get().Fields("maxImportSizes", "maxUploadSize", "storageQuota", "user").Do() + if err != nil { + return fmt.Errorf("Failed to get about: %s", err) + } - user := about.User - quota := about.StorageQuota + user := about.User + quota := about.StorageQuota - fmt.Fprintf(args.Out, "User: %s, %s\n", user.DisplayName, user.EmailAddress) - fmt.Fprintf(args.Out, "Used: %s\n", formatSize(quota.Usage, args.SizeInBytes)) - fmt.Fprintf(args.Out, "Free: %s\n", formatSize(quota.Limit - quota.Usage, args.SizeInBytes)) - fmt.Fprintf(args.Out, "Total: %s\n", formatSize(quota.Limit, args.SizeInBytes)) - fmt.Fprintf(args.Out, "Max upload size: %s\n", formatSize(about.MaxUploadSize, args.SizeInBytes)) - return + fmt.Fprintf(args.Out, "User: %s, %s\n", user.DisplayName, user.EmailAddress) + fmt.Fprintf(args.Out, "Used: %s\n", formatSize(quota.Usage, args.SizeInBytes)) + fmt.Fprintf(args.Out, "Free: %s\n", formatSize(quota.Limit-quota.Usage, args.SizeInBytes)) + fmt.Fprintf(args.Out, "Total: %s\n", formatSize(quota.Limit, args.SizeInBytes)) + fmt.Fprintf(args.Out, "Max upload size: %s\n", formatSize(about.MaxUploadSize, args.SizeInBytes)) + return } type AboutImportArgs struct { - Out io.Writer + Out io.Writer } func (self *Drive) AboutImport(args AboutImportArgs) (err error) { - about, err := self.service.About.Get().Fields("importFormats").Do() - if err != nil { - return fmt.Errorf("Failed to get about: %s", err) - } - printAboutFormats(args.Out, about.ImportFormats) - return + about, err := self.service.About.Get().Fields("importFormats").Do() + if err != nil { + return fmt.Errorf("Failed to get about: %s", err) + } + printAboutFormats(args.Out, about.ImportFormats) + return } type AboutExportArgs struct { - Out io.Writer + Out io.Writer } func (self *Drive) AboutExport(args AboutExportArgs) (err error) { - about, err := self.service.About.Get().Fields("exportFormats").Do() - if err != nil { - return fmt.Errorf("Failed to get about: %s", err) - } - printAboutFormats(args.Out, about.ExportFormats) - return + about, err := self.service.About.Get().Fields("exportFormats").Do() + if err != nil { + return fmt.Errorf("Failed to get about: %s", err) + } + printAboutFormats(args.Out, about.ExportFormats) + return } func printAboutFormats(out io.Writer, formats map[string][]string) { - w := new(tabwriter.Writer) - w.Init(out, 0, 0, 3, ' ', 0) + w := new(tabwriter.Writer) + w.Init(out, 0, 0, 3, ' ', 0) - fmt.Fprintln(w, "From\tTo") + fmt.Fprintln(w, "From\tTo") - for from, toFormats := range formats { - fmt.Fprintf(w, "%s\t%s\n", from, formatList(toFormats)) - } + for from, toFormats := range formats { + fmt.Fprintf(w, "%s\t%s\n", from, formatList(toFormats)) + } - w.Flush() + w.Flush() } diff --git a/drive/changes.go b/drive/changes.go index 1d9a89d..ffd7824 100644 --- a/drive/changes.go +++ b/drive/changes.go @@ -1,103 +1,103 @@ package drive import ( - "fmt" - "io" - "text/tabwriter" - "google.golang.org/api/drive/v3" + "fmt" + "google.golang.org/api/drive/v3" + "io" + "text/tabwriter" ) type ListChangesArgs struct { - Out io.Writer - PageToken string - MaxChanges int64 - Now bool - NameWidth int64 - SkipHeader bool + Out io.Writer + PageToken string + MaxChanges int64 + Now bool + NameWidth int64 + SkipHeader bool } func (self *Drive) ListChanges(args ListChangesArgs) error { - if args.Now { - pageToken, err := self.GetChangesStartPageToken() - if err != nil { - return err - } - - fmt.Fprintf(args.Out, "Page token: %s\n", pageToken) - return nil - } - - changeList, err := self.service.Changes.List(args.PageToken).PageSize(args.MaxChanges).RestrictToMyDrive(true).Fields("newStartPageToken", "nextPageToken", "changes(fileId,removed,time,file(id,name,md5Checksum,mimeType,createdTime,modifiedTime))").Do() - if err != nil { - return fmt.Errorf("Failed listing changes: %s", err) - } - - PrintChanges(PrintChangesArgs{ - Out: args.Out, - ChangeList: changeList, - NameWidth: int(args.NameWidth), - SkipHeader: args.SkipHeader, - }) - - return nil + if args.Now { + pageToken, err := self.GetChangesStartPageToken() + if err != nil { + return err + } + + fmt.Fprintf(args.Out, "Page token: %s\n", pageToken) + return nil + } + + changeList, err := self.service.Changes.List(args.PageToken).PageSize(args.MaxChanges).RestrictToMyDrive(true).Fields("newStartPageToken", "nextPageToken", "changes(fileId,removed,time,file(id,name,md5Checksum,mimeType,createdTime,modifiedTime))").Do() + if err != nil { + return fmt.Errorf("Failed listing changes: %s", err) + } + + PrintChanges(PrintChangesArgs{ + Out: args.Out, + ChangeList: changeList, + NameWidth: int(args.NameWidth), + SkipHeader: args.SkipHeader, + }) + + return nil } func (self *Drive) GetChangesStartPageToken() (string, error) { - res, err := self.service.Changes.GetStartPageToken().Do() - if err != nil { - return "", fmt.Errorf("Failed getting start page token: %s", err) - } + res, err := self.service.Changes.GetStartPageToken().Do() + if err != nil { + return "", fmt.Errorf("Failed getting start page token: %s", err) + } - return res.StartPageToken, nil + return res.StartPageToken, nil } type PrintChangesArgs struct { - Out io.Writer - ChangeList *drive.ChangeList - NameWidth int - SkipHeader bool + Out io.Writer + ChangeList *drive.ChangeList + NameWidth int + SkipHeader bool } func PrintChanges(args PrintChangesArgs) { - w := new(tabwriter.Writer) - w.Init(args.Out, 0, 0, 3, ' ', 0) - - if !args.SkipHeader { - fmt.Fprintln(w, "Id\tName\tAction\tTime") - } - - for _, c := range args.ChangeList.Changes { - var name string - var action string - - if c.Removed { - action = "remove" - } else { - name = c.File.Name - action = "update" - } - - fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", - c.FileId, - truncateString(name, args.NameWidth), - action, - formatDatetime(c.Time), - ) - } - - if len(args.ChangeList.Changes) > 0 { - w.Flush() - pageToken, hasMore := nextChangesPageToken(args.ChangeList) - fmt.Fprintf(args.Out, "\nToken: %s, more: %t\n", pageToken, hasMore) - } else { - fmt.Fprintln(args.Out, "No changes") - } + w := new(tabwriter.Writer) + w.Init(args.Out, 0, 0, 3, ' ', 0) + + if !args.SkipHeader { + fmt.Fprintln(w, "Id\tName\tAction\tTime") + } + + for _, c := range args.ChangeList.Changes { + var name string + var action string + + if c.Removed { + action = "remove" + } else { + name = c.File.Name + action = "update" + } + + fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", + c.FileId, + truncateString(name, args.NameWidth), + action, + formatDatetime(c.Time), + ) + } + + if len(args.ChangeList.Changes) > 0 { + w.Flush() + pageToken, hasMore := nextChangesPageToken(args.ChangeList) + fmt.Fprintf(args.Out, "\nToken: %s, more: %t\n", pageToken, hasMore) + } else { + fmt.Fprintln(args.Out, "No changes") + } } func nextChangesPageToken(cl *drive.ChangeList) (string, bool) { - if cl.NextPageToken != "" { - return cl.NextPageToken, true - } + if cl.NextPageToken != "" { + return cl.NextPageToken, true + } - return cl.NewStartPageToken, false + return cl.NewStartPageToken, false } diff --git a/drive/delete.go b/drive/delete.go index bacd4a3..314672c 100644 --- a/drive/delete.go +++ b/drive/delete.go @@ -1,39 +1,39 @@ package drive import ( - "io" - "fmt" + "fmt" + "io" ) type DeleteArgs struct { - Out io.Writer - Id string - Recursive bool + Out io.Writer + Id string + Recursive bool } func (self *Drive) Delete(args DeleteArgs) error { - f, err := self.service.Files.Get(args.Id).Fields("name", "mimeType").Do() - if err != nil { - return fmt.Errorf("Failed to get file: %s", err) - } + f, err := self.service.Files.Get(args.Id).Fields("name", "mimeType").Do() + if err != nil { + return fmt.Errorf("Failed to get file: %s", err) + } - if isDir(f) && !args.Recursive { - return fmt.Errorf("'%s' is a directory, use the 'recursive' flag to delete directories", f.Name) - } + if isDir(f) && !args.Recursive { + return fmt.Errorf("'%s' is a directory, use the 'recursive' flag to delete directories", f.Name) + } - err = self.service.Files.Delete(args.Id).Do() - if err != nil { - return fmt.Errorf("Failed to delete file: %s", err) - } + err = self.service.Files.Delete(args.Id).Do() + if err != nil { + return fmt.Errorf("Failed to delete file: %s", err) + } - fmt.Fprintf(args.Out, "Deleted '%s'\n", f.Name) - return nil + fmt.Fprintf(args.Out, "Deleted '%s'\n", f.Name) + return nil } func (self *Drive) deleteFile(fileId string) error { - err := self.service.Files.Delete(fileId).Do() - if err != nil { - return fmt.Errorf("Failed to delete file: %s", err) - } - return nil + err := self.service.Files.Delete(fileId).Do() + if err != nil { + return fmt.Errorf("Failed to delete file: %s", err) + } + return nil } diff --git a/drive/download.go b/drive/download.go index 1779d57..15495df 100644 --- a/drive/download.go +++ b/drive/download.go @@ -1,245 +1,245 @@ package drive import ( - "fmt" - "io" - "os" - "time" - "path/filepath" - "google.golang.org/api/drive/v3" - "google.golang.org/api/googleapi" + "fmt" + "google.golang.org/api/drive/v3" + "google.golang.org/api/googleapi" + "io" + "os" + "path/filepath" + "time" ) type DownloadArgs struct { - Out io.Writer - Progress io.Writer - Id string - Path string - Force bool - Recursive bool - Delete bool - Stdout bool + Out io.Writer + Progress io.Writer + Id string + Path string + Force bool + Recursive bool + Delete bool + Stdout bool } func (self *Drive) Download(args DownloadArgs) error { - if args.Recursive { - return self.downloadRecursive(args) - } - - f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "mimeType", "md5Checksum").Do() - if err != nil { - return fmt.Errorf("Failed to get file: %s", err) - } - - if isDir(f) { - return fmt.Errorf("'%s' is a directory, use --recursive to download directories", f.Name) - } - - if !isBinary(f) { - return fmt.Errorf("'%s' is a google document and must be exported, see the export command", f.Name) - } - - bytes, rate, err := self.downloadBinary(f, args) - - if !args.Stdout { - fmt.Fprintf(args.Out, "Downloaded %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(bytes, false)) - } - - if args.Delete { - err = self.deleteFile(args.Id) - if err != nil { - return fmt.Errorf("Failed to delete file: %s", err) - } - - if !args.Stdout { - fmt.Fprintf(args.Out, "Removed %s\n", args.Id) - } - } - return err + if args.Recursive { + return self.downloadRecursive(args) + } + + f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "mimeType", "md5Checksum").Do() + if err != nil { + return fmt.Errorf("Failed to get file: %s", err) + } + + if isDir(f) { + return fmt.Errorf("'%s' is a directory, use --recursive to download directories", f.Name) + } + + if !isBinary(f) { + return fmt.Errorf("'%s' is a google document and must be exported, see the export command", f.Name) + } + + bytes, rate, err := self.downloadBinary(f, args) + + if !args.Stdout { + fmt.Fprintf(args.Out, "Downloaded %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(bytes, false)) + } + + if args.Delete { + err = self.deleteFile(args.Id) + if err != nil { + return fmt.Errorf("Failed to delete file: %s", err) + } + + if !args.Stdout { + fmt.Fprintf(args.Out, "Removed %s\n", args.Id) + } + } + return err } type DownloadQueryArgs struct { - Out io.Writer - Progress io.Writer - Query string - Path string - Force bool - Recursive bool + Out io.Writer + Progress io.Writer + Query string + Path string + Force bool + Recursive bool } func (self *Drive) DownloadQuery(args DownloadQueryArgs) error { - listArgs := listAllFilesArgs{ - query: args.Query, - fields: []googleapi.Field{"nextPageToken", "files(id,name,mimeType,size,md5Checksum)"}, - } - files, err := self.listAllFiles(listArgs) - if err != nil { - return fmt.Errorf("Failed to list files: %s", err) - } - - downloadArgs := DownloadArgs{ - Out: args.Out, - Progress: args.Progress, - Path: args.Path, - Force: args.Force, - } - - for _, f := range files { - if isDir(f) && args.Recursive { - err = self.downloadDirectory(f, downloadArgs) - } else if isBinary(f) { - _, _, err = self.downloadBinary(f, downloadArgs) - } - - if err != nil { - return err - } - } - - return nil + listArgs := listAllFilesArgs{ + query: args.Query, + fields: []googleapi.Field{"nextPageToken", "files(id,name,mimeType,size,md5Checksum)"}, + } + files, err := self.listAllFiles(listArgs) + if err != nil { + return fmt.Errorf("Failed to list files: %s", err) + } + + downloadArgs := DownloadArgs{ + Out: args.Out, + Progress: args.Progress, + Path: args.Path, + Force: args.Force, + } + + for _, f := range files { + if isDir(f) && args.Recursive { + err = self.downloadDirectory(f, downloadArgs) + } else if isBinary(f) { + _, _, err = self.downloadBinary(f, downloadArgs) + } + + if err != nil { + return err + } + } + + return nil } func (self *Drive) downloadRecursive(args DownloadArgs) error { - f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "mimeType", "md5Checksum").Do() - if err != nil { - return fmt.Errorf("Failed to get file: %s", err) - } - - if isDir(f) { - return self.downloadDirectory(f, args) - } else if isBinary(f) { - _, _, err = self.downloadBinary(f, args) - return err - } - - return nil + f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "mimeType", "md5Checksum").Do() + if err != nil { + return fmt.Errorf("Failed to get file: %s", err) + } + + if isDir(f) { + return self.downloadDirectory(f, args) + } else if isBinary(f) { + _, _, err = self.downloadBinary(f, args) + return err + } + + return nil } func (self *Drive) downloadBinary(f *drive.File, args DownloadArgs) (int64, int64, error) { - // Get timeout reader wrapper and context - timeoutReaderWrapper, ctx := getTimeoutReaderWrapperContext() - - res, err := self.service.Files.Get(f.Id).Context(ctx).Download() - if err != nil { - return 0, 0, fmt.Errorf("Failed to download file: %s", err) - } - - // Close body on function exit - defer res.Body.Close() - - // Path to file - fpath := filepath.Join(args.Path, f.Name) - - if !args.Stdout { - fmt.Fprintf(args.Out, "Downloading %s -> %s\n", f.Name, fpath) - } - - return self.saveFile(saveFileArgs{ - out: args.Out, - body: timeoutReaderWrapper(res.Body), - contentLength: res.ContentLength, - fpath: fpath, - force: args.Force, - stdout: args.Stdout, - progress: args.Progress, - }) + // Get timeout reader wrapper and context + timeoutReaderWrapper, ctx := getTimeoutReaderWrapperContext() + + res, err := self.service.Files.Get(f.Id).Context(ctx).Download() + if err != nil { + return 0, 0, fmt.Errorf("Failed to download file: %s", err) + } + + // Close body on function exit + defer res.Body.Close() + + // Path to file + fpath := filepath.Join(args.Path, f.Name) + + if !args.Stdout { + fmt.Fprintf(args.Out, "Downloading %s -> %s\n", f.Name, fpath) + } + + return self.saveFile(saveFileArgs{ + out: args.Out, + body: timeoutReaderWrapper(res.Body), + contentLength: res.ContentLength, + fpath: fpath, + force: args.Force, + stdout: args.Stdout, + progress: args.Progress, + }) } type saveFileArgs struct { - out io.Writer - body io.Reader - contentLength int64 - fpath string - force bool - stdout bool - progress io.Writer + out io.Writer + body io.Reader + contentLength int64 + fpath string + force bool + stdout bool + progress io.Writer } func (self *Drive) saveFile(args saveFileArgs) (int64, int64, error) { - // Wrap response body in progress reader - srcReader := getProgressReader(args.body, args.progress, args.contentLength) - - if args.stdout { - // Write file content to stdout - _, err := io.Copy(args.out, srcReader) - return 0, 0, err - } - - // Check if file exists - if !args.force && fileExists(args.fpath) { - return 0, 0, fmt.Errorf("File '%s' already exists, use --force to overwrite", args.fpath) - } - - // Ensure any parent directories exists - if err := mkdir(args.fpath); err != nil { - return 0, 0, err - } - - // Download to tmp file - tmpPath := args.fpath + ".incomplete" - - // Create new file - outFile, err := os.Create(tmpPath) - if err != nil { - return 0, 0, fmt.Errorf("Unable to create new file: %s", err) - } - - started := time.Now() - - // Save file to disk - bytes, err := io.Copy(outFile, srcReader) - if err != nil { - outFile.Close() - os.Remove(tmpPath) - return 0, 0, fmt.Errorf("Failed saving file: %s", err) - } - - // Calculate average download rate - rate := calcRate(bytes, started, time.Now()) - - //if deleteSourceFile { - // self.Delete(args.Id) - //} - - // Close File - outFile.Close() - - // Rename tmp file to proper filename - return bytes, rate, os.Rename(tmpPath, args.fpath) + // Wrap response body in progress reader + srcReader := getProgressReader(args.body, args.progress, args.contentLength) + + if args.stdout { + // Write file content to stdout + _, err := io.Copy(args.out, srcReader) + return 0, 0, err + } + + // Check if file exists + if !args.force && fileExists(args.fpath) { + return 0, 0, fmt.Errorf("File '%s' already exists, use --force to overwrite", args.fpath) + } + + // Ensure any parent directories exists + if err := mkdir(args.fpath); err != nil { + return 0, 0, err + } + + // Download to tmp file + tmpPath := args.fpath + ".incomplete" + + // Create new file + outFile, err := os.Create(tmpPath) + if err != nil { + return 0, 0, fmt.Errorf("Unable to create new file: %s", err) + } + + started := time.Now() + + // Save file to disk + bytes, err := io.Copy(outFile, srcReader) + if err != nil { + outFile.Close() + os.Remove(tmpPath) + return 0, 0, fmt.Errorf("Failed saving file: %s", err) + } + + // Calculate average download rate + rate := calcRate(bytes, started, time.Now()) + + //if deleteSourceFile { + // self.Delete(args.Id) + //} + + // Close File + outFile.Close() + + // Rename tmp file to proper filename + return bytes, rate, os.Rename(tmpPath, args.fpath) } func (self *Drive) downloadDirectory(parent *drive.File, args DownloadArgs) error { - listArgs := listAllFilesArgs{ - query: fmt.Sprintf("'%s' in parents", parent.Id), - fields: []googleapi.Field{"nextPageToken", "files(id,name)"}, - } - files, err := self.listAllFiles(listArgs) - if err != nil { - return fmt.Errorf("Failed listing files: %s", err) - } - - newPath := filepath.Join(args.Path, parent.Name) - - for _, f := range files { - // Copy args and update changed fields - newArgs := args - newArgs.Path = newPath - newArgs.Id = f.Id - newArgs.Stdout = false - - err = self.downloadRecursive(newArgs) - if err != nil { - return err - } - } - - return nil + listArgs := listAllFilesArgs{ + query: fmt.Sprintf("'%s' in parents", parent.Id), + fields: []googleapi.Field{"nextPageToken", "files(id,name)"}, + } + files, err := self.listAllFiles(listArgs) + if err != nil { + return fmt.Errorf("Failed listing files: %s", err) + } + + newPath := filepath.Join(args.Path, parent.Name) + + for _, f := range files { + // Copy args and update changed fields + newArgs := args + newArgs.Path = newPath + newArgs.Id = f.Id + newArgs.Stdout = false + + err = self.downloadRecursive(newArgs) + if err != nil { + return err + } + } + + return nil } func isDir(f *drive.File) bool { - return f.MimeType == DirectoryMimeType + return f.MimeType == DirectoryMimeType } func isBinary(f *drive.File) bool { - return f.Md5Checksum != "" + return f.Md5Checksum != "" } diff --git a/drive/drive.go b/drive/drive.go index d908beb..696f5d5 100644 --- a/drive/drive.go +++ b/drive/drive.go @@ -1,19 +1,19 @@ package drive import ( - "net/http" - "google.golang.org/api/drive/v3" + "google.golang.org/api/drive/v3" + "net/http" ) type Drive struct { - service *drive.Service + service *drive.Service } func New(client *http.Client) (*Drive, error) { - service, err := drive.New(client) - if err != nil { - return nil, err - } + service, err := drive.New(client) + if err != nil { + return nil, err + } - return &Drive{service}, nil + return &Drive{service}, nil } diff --git a/drive/errors.go b/drive/errors.go index 703dae5..e7631f7 100644 --- a/drive/errors.go +++ b/drive/errors.go @@ -1,22 +1,22 @@ package drive import ( - "google.golang.org/api/googleapi" - "time" + "google.golang.org/api/googleapi" + "time" ) const MaxBackendErrorRetries = 5 func isBackendError(err error) bool { - if err == nil { - return false - } + if err == nil { + return false + } - ae, ok := err.(*googleapi.Error) - return ok && ae.Code >= 500 && ae.Code <= 599 + ae, ok := err.(*googleapi.Error) + return ok && ae.Code >= 500 && ae.Code <= 599 } func exponentialBackoffSleep(try int) { - seconds := pow(2, try) - time.Sleep(time.Duration(seconds) * time.Second) + seconds := pow(2, try) + time.Sleep(time.Duration(seconds) * time.Second) } diff --git a/drive/export.go b/drive/export.go index c90bc10..3fdd45a 100644 --- a/drive/export.go +++ b/drive/export.go @@ -1,111 +1,111 @@ package drive import ( - "io" - "os" - "fmt" - "mime" + "fmt" + "io" + "mime" + "os" ) var DefaultExportMime = map[string]string{ - "application/vnd.google-apps.form": "application/zip", - "application/vnd.google-apps.document": "application/pdf", - "application/vnd.google-apps.drawing": "image/svg+xml", - "application/vnd.google-apps.spreadsheet": "text/csv", - "application/vnd.google-apps.script": "application/vnd.google-apps.script+json", - "application/vnd.google-apps.presentation": "application/pdf", + "application/vnd.google-apps.form": "application/zip", + "application/vnd.google-apps.document": "application/pdf", + "application/vnd.google-apps.drawing": "image/svg+xml", + "application/vnd.google-apps.spreadsheet": "text/csv", + "application/vnd.google-apps.script": "application/vnd.google-apps.script+json", + "application/vnd.google-apps.presentation": "application/pdf", } type ExportArgs struct { - Out io.Writer - Id string - PrintMimes bool - Mime string - Force bool + Out io.Writer + Id string + PrintMimes bool + Mime string + Force bool } func (self *Drive) Export(args ExportArgs) error { - f, err := self.service.Files.Get(args.Id).Fields("name", "mimeType").Do() - if err != nil { - return fmt.Errorf("Failed to get file: %s", err) - } - - if args.PrintMimes { - return self.printMimes(args.Out, f.MimeType) - } - - exportMime, err := getExportMime(args.Mime, f.MimeType) - if err != nil { - return err - } - - filename := getExportFilename(f.Name, exportMime) - - res, err := self.service.Files.Export(args.Id, exportMime).Download() - if err != nil { - return fmt.Errorf("Failed to download file: %s", err) - } - - // Close body on function exit - defer res.Body.Close() - - // Check if file exists - if !args.Force && fileExists(filename) { - return fmt.Errorf("File '%s' already exists, use --force to overwrite", filename) - } - - // Create new file - outFile, err := os.Create(filename) - if err != nil { - return fmt.Errorf("Unable to create new file '%s': %s", filename, err) - } - - // Close file on function exit - defer outFile.Close() - - // Save file to disk - _, err = io.Copy(outFile, res.Body) - if err != nil { - return fmt.Errorf("Failed saving file: %s", err) - } - - fmt.Fprintf(args.Out, "Exported '%s' with mime type: '%s'\n", filename, exportMime) - return nil + f, err := self.service.Files.Get(args.Id).Fields("name", "mimeType").Do() + if err != nil { + return fmt.Errorf("Failed to get file: %s", err) + } + + if args.PrintMimes { + return self.printMimes(args.Out, f.MimeType) + } + + exportMime, err := getExportMime(args.Mime, f.MimeType) + if err != nil { + return err + } + + filename := getExportFilename(f.Name, exportMime) + + res, err := self.service.Files.Export(args.Id, exportMime).Download() + if err != nil { + return fmt.Errorf("Failed to download file: %s", err) + } + + // Close body on function exit + defer res.Body.Close() + + // Check if file exists + if !args.Force && fileExists(filename) { + return fmt.Errorf("File '%s' already exists, use --force to overwrite", filename) + } + + // Create new file + outFile, err := os.Create(filename) + if err != nil { + return fmt.Errorf("Unable to create new file '%s': %s", filename, err) + } + + // Close file on function exit + defer outFile.Close() + + // Save file to disk + _, err = io.Copy(outFile, res.Body) + if err != nil { + return fmt.Errorf("Failed saving file: %s", err) + } + + fmt.Fprintf(args.Out, "Exported '%s' with mime type: '%s'\n", filename, exportMime) + return nil } func (self *Drive) printMimes(out io.Writer, mimeType string) error { - about, err := self.service.About.Get().Fields("exportFormats").Do() - if err != nil { - return fmt.Errorf("Failed to get about: %s", err) - } - - mimes, ok := about.ExportFormats[mimeType] - if !ok { - return fmt.Errorf("File with type '%s' cannot be exported", mimeType) - } - - fmt.Fprintf(out, "Available mime types: %s\n", formatList(mimes)) - return nil + about, err := self.service.About.Get().Fields("exportFormats").Do() + if err != nil { + return fmt.Errorf("Failed to get about: %s", err) + } + + mimes, ok := about.ExportFormats[mimeType] + if !ok { + return fmt.Errorf("File with type '%s' cannot be exported", mimeType) + } + + fmt.Fprintf(out, "Available mime types: %s\n", formatList(mimes)) + return nil } func getExportMime(userMime, fileMime string) (string, error) { - if userMime != "" { - return userMime, nil - } + if userMime != "" { + return userMime, nil + } - defaultMime, ok := DefaultExportMime[fileMime] - if !ok { - return "", fmt.Errorf("File with type '%s' does not have a default export mime, and can probably not be exported", fileMime) - } + defaultMime, ok := DefaultExportMime[fileMime] + if !ok { + return "", fmt.Errorf("File with type '%s' does not have a default export mime, and can probably not be exported", fileMime) + } - return defaultMime, nil + return defaultMime, nil } func getExportFilename(name, mimeType string) string { - extensions, err := mime.ExtensionsByType(mimeType) - if err != nil || len(extensions) == 0 { - return name - } + extensions, err := mime.ExtensionsByType(mimeType) + if err != nil || len(extensions) == 0 { + return name + } - return name + extensions[0] + return name + extensions[0] } diff --git a/drive/import.go b/drive/import.go index a3d8b3b..2ee5f1e 100644 --- a/drive/import.go +++ b/drive/import.go @@ -1,53 +1,53 @@ package drive import ( - "io" - "io/ioutil" - "fmt" - "strings" - "mime" - "path/filepath" + "fmt" + "io" + "io/ioutil" + "mime" + "path/filepath" + "strings" ) type ImportArgs struct { - Out io.Writer - Progress io.Writer - Path string - Parents []string + Out io.Writer + Progress io.Writer + Path string + Parents []string } func (self *Drive) Import(args ImportArgs) error { - fromMime := getMimeType(args.Path) - if fromMime == "" { - return fmt.Errorf("Could not determine mime type of file") - } - - about, err := self.service.About.Get().Fields("importFormats").Do() - if err != nil { - return fmt.Errorf("Failed to get about: %s", err) - } - - toMimes, ok := about.ImportFormats[fromMime] - if !ok || len(toMimes) == 0 { - return fmt.Errorf("Mime type '%s' is not supported for import", fromMime) - } - - f, _, err := self.uploadFile(UploadArgs{ - Out: ioutil.Discard, - Progress: args.Progress, - Path: args.Path, - Parents: args.Parents, - Mime: toMimes[0], - }) - if err != nil { - return err - } - - fmt.Fprintf(args.Out, "Imported %s with mime type: '%s'\n", f.Id, toMimes[0]) - return nil + fromMime := getMimeType(args.Path) + if fromMime == "" { + return fmt.Errorf("Could not determine mime type of file") + } + + about, err := self.service.About.Get().Fields("importFormats").Do() + if err != nil { + return fmt.Errorf("Failed to get about: %s", err) + } + + toMimes, ok := about.ImportFormats[fromMime] + if !ok || len(toMimes) == 0 { + return fmt.Errorf("Mime type '%s' is not supported for import", fromMime) + } + + f, _, err := self.uploadFile(UploadArgs{ + Out: ioutil.Discard, + Progress: args.Progress, + Path: args.Path, + Parents: args.Parents, + Mime: toMimes[0], + }) + if err != nil { + return err + } + + fmt.Fprintf(args.Out, "Imported %s with mime type: '%s'\n", f.Id, toMimes[0]) + return nil } func getMimeType(path string) string { - t := mime.TypeByExtension(filepath.Ext(path)) - return strings.Split(t, ";")[0] + t := mime.TypeByExtension(filepath.Ext(path)) + return strings.Split(t, ";")[0] } diff --git a/drive/info.go b/drive/info.go index aa190a8..c6f4471 100644 --- a/drive/info.go +++ b/drive/info.go @@ -1,68 +1,68 @@ package drive import ( - "io" - "fmt" - "google.golang.org/api/drive/v3" + "fmt" + "google.golang.org/api/drive/v3" + "io" ) type FileInfoArgs struct { - Out io.Writer - Id string - SizeInBytes bool + Out io.Writer + Id string + SizeInBytes bool } func (self *Drive) Info(args FileInfoArgs) error { - f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "createdTime", "modifiedTime", "md5Checksum", "mimeType", "parents", "shared", "description", "webContentLink", "webViewLink").Do() - if err != nil { - return fmt.Errorf("Failed to get file: %s", err) - } + f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "createdTime", "modifiedTime", "md5Checksum", "mimeType", "parents", "shared", "description", "webContentLink", "webViewLink").Do() + if err != nil { + return fmt.Errorf("Failed to get file: %s", err) + } - pathfinder := self.newPathfinder() - absPath, err := pathfinder.absPath(f) - if err != nil { - return err - } + pathfinder := self.newPathfinder() + absPath, err := pathfinder.absPath(f) + if err != nil { + return err + } - PrintFileInfo(PrintFileInfoArgs{ - Out: args.Out, - File: f, - Path: absPath, - SizeInBytes: args.SizeInBytes, - }) + PrintFileInfo(PrintFileInfoArgs{ + Out: args.Out, + File: f, + Path: absPath, + SizeInBytes: args.SizeInBytes, + }) - return nil + return nil } type PrintFileInfoArgs struct { - Out io.Writer - File *drive.File - Path string - SizeInBytes bool + Out io.Writer + File *drive.File + Path string + SizeInBytes bool } func PrintFileInfo(args PrintFileInfoArgs) { - f := args.File + f := args.File - items := []kv{ - kv{"Id", f.Id}, - kv{"Name", f.Name}, - kv{"Path", args.Path}, - kv{"Description", f.Description}, - kv{"Mime", f.MimeType}, - kv{"Size", formatSize(f.Size, args.SizeInBytes)}, - kv{"Created", formatDatetime(f.CreatedTime)}, - kv{"Modified", formatDatetime(f.ModifiedTime)}, - kv{"Md5sum", f.Md5Checksum}, - kv{"Shared", formatBool(f.Shared)}, - kv{"Parents", formatList(f.Parents)}, - kv{"ViewUrl", f.WebViewLink}, - kv{"DownloadUrl", f.WebContentLink}, - } + items := []kv{ + kv{"Id", f.Id}, + kv{"Name", f.Name}, + kv{"Path", args.Path}, + kv{"Description", f.Description}, + kv{"Mime", f.MimeType}, + kv{"Size", formatSize(f.Size, args.SizeInBytes)}, + kv{"Created", formatDatetime(f.CreatedTime)}, + kv{"Modified", formatDatetime(f.ModifiedTime)}, + kv{"Md5sum", f.Md5Checksum}, + kv{"Shared", formatBool(f.Shared)}, + kv{"Parents", formatList(f.Parents)}, + kv{"ViewUrl", f.WebViewLink}, + kv{"DownloadUrl", f.WebContentLink}, + } - for _, item := range items { - if item.value != "" { - fmt.Fprintf(args.Out, "%s: %s\n", item.key, item.value) - } - } + for _, item := range items { + if item.value != "" { + fmt.Fprintf(args.Out, "%s: %s\n", item.key, item.value) + } + } } diff --git a/drive/list.go b/drive/list.go index 73fdea5..ab8aca5 100644 --- a/drive/list.go +++ b/drive/list.go @@ -1,136 +1,136 @@ package drive import ( - "fmt" - "io" - "text/tabwriter" - "golang.org/x/net/context" - "google.golang.org/api/drive/v3" - "google.golang.org/api/googleapi" + "fmt" + "golang.org/x/net/context" + "google.golang.org/api/drive/v3" + "google.golang.org/api/googleapi" + "io" + "text/tabwriter" ) type ListFilesArgs struct { - Out io.Writer - MaxFiles int64 - NameWidth int64 - Query string - SortOrder string - SkipHeader bool - SizeInBytes bool - AbsPath bool + Out io.Writer + MaxFiles int64 + NameWidth int64 + Query string + SortOrder string + SkipHeader bool + SizeInBytes bool + AbsPath bool } func (self *Drive) List(args ListFilesArgs) (err error) { - listArgs := listAllFilesArgs{ - query: args.Query, - fields: []googleapi.Field{"nextPageToken", "files(id,name,md5Checksum,mimeType,size,createdTime,parents)"}, - sortOrder: args.SortOrder, - maxFiles: args.MaxFiles, - } - files, err := self.listAllFiles(listArgs) - if err != nil { - return fmt.Errorf("Failed to list files: %s", err) - } - - pathfinder := self.newPathfinder() - - if args.AbsPath { - // Replace name with absolute path - for _, f := range files { - f.Name, err = pathfinder.absPath(f) - if err != nil { - return err - } - } - } - - PrintFileList(PrintFileListArgs{ - Out: args.Out, - Files: files, - NameWidth: int(args.NameWidth), - SkipHeader: args.SkipHeader, - SizeInBytes: args.SizeInBytes, - }) - - return + listArgs := listAllFilesArgs{ + query: args.Query, + fields: []googleapi.Field{"nextPageToken", "files(id,name,md5Checksum,mimeType,size,createdTime,parents)"}, + sortOrder: args.SortOrder, + maxFiles: args.MaxFiles, + } + files, err := self.listAllFiles(listArgs) + if err != nil { + return fmt.Errorf("Failed to list files: %s", err) + } + + pathfinder := self.newPathfinder() + + if args.AbsPath { + // Replace name with absolute path + for _, f := range files { + f.Name, err = pathfinder.absPath(f) + if err != nil { + return err + } + } + } + + PrintFileList(PrintFileListArgs{ + Out: args.Out, + Files: files, + NameWidth: int(args.NameWidth), + SkipHeader: args.SkipHeader, + SizeInBytes: args.SizeInBytes, + }) + + return } type listAllFilesArgs struct { - query string - fields []googleapi.Field - sortOrder string - maxFiles int64 + query string + fields []googleapi.Field + sortOrder string + maxFiles int64 } func (self *Drive) listAllFiles(args listAllFilesArgs) ([]*drive.File, error) { - var files []*drive.File + var files []*drive.File - var pageSize int64 - if args.maxFiles > 0 && args.maxFiles < 1000 { - pageSize = args.maxFiles - } else { - pageSize = 1000 - } + var pageSize int64 + if args.maxFiles > 0 && args.maxFiles < 1000 { + pageSize = args.maxFiles + } else { + pageSize = 1000 + } - controlledStop := fmt.Errorf("Controlled stop") + controlledStop := fmt.Errorf("Controlled stop") - err := self.service.Files.List().Q(args.query).Fields(args.fields...).OrderBy(args.sortOrder).PageSize(pageSize).Pages(context.TODO(), func(fl *drive.FileList) error { - files = append(files, fl.Files...) + err := self.service.Files.List().Q(args.query).Fields(args.fields...).OrderBy(args.sortOrder).PageSize(pageSize).Pages(context.TODO(), func(fl *drive.FileList) error { + files = append(files, fl.Files...) - // Stop when we have all the files we need - if args.maxFiles > 0 && len(files) >= int(args.maxFiles) { - return controlledStop - } + // Stop when we have all the files we need + if args.maxFiles > 0 && len(files) >= int(args.maxFiles) { + return controlledStop + } - return nil - }) + return nil + }) - if err != nil && err != controlledStop { - return nil, err - } + if err != nil && err != controlledStop { + return nil, err + } - if args.maxFiles > 0 { - n := min(len(files), int(args.maxFiles)) - return files[:n], nil - } + if args.maxFiles > 0 { + n := min(len(files), int(args.maxFiles)) + return files[:n], nil + } - return files, nil + return files, nil } type PrintFileListArgs struct { - Out io.Writer - Files []*drive.File - NameWidth int - SkipHeader bool - SizeInBytes bool + Out io.Writer + Files []*drive.File + NameWidth int + SkipHeader bool + SizeInBytes bool } func PrintFileList(args PrintFileListArgs) { - w := new(tabwriter.Writer) - w.Init(args.Out, 0, 0, 3, ' ', 0) - - if !args.SkipHeader { - fmt.Fprintln(w, "Id\tName\tType\tSize\tCreated") - } - - for _, f := range args.Files { - fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", - f.Id, - truncateString(f.Name, args.NameWidth), - filetype(f), - formatSize(f.Size, args.SizeInBytes), - formatDatetime(f.CreatedTime), - ) - } - - w.Flush() + w := new(tabwriter.Writer) + w.Init(args.Out, 0, 0, 3, ' ', 0) + + if !args.SkipHeader { + fmt.Fprintln(w, "Id\tName\tType\tSize\tCreated") + } + + for _, f := range args.Files { + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", + f.Id, + truncateString(f.Name, args.NameWidth), + filetype(f), + formatSize(f.Size, args.SizeInBytes), + formatDatetime(f.CreatedTime), + ) + } + + w.Flush() } func filetype(f *drive.File) string { - if isDir(f) { - return "dir" - } else if isBinary(f) { - return "bin" - } - return "doc" + if isDir(f) { + return "dir" + } else if isBinary(f) { + return "bin" + } + return "doc" } diff --git a/drive/mkdir.go b/drive/mkdir.go index f6f0641..8eea210 100644 --- a/drive/mkdir.go +++ b/drive/mkdir.go @@ -1,39 +1,39 @@ package drive import ( - "google.golang.org/api/drive/v3" - "io" - "fmt" + "fmt" + "google.golang.org/api/drive/v3" + "io" ) const DirectoryMimeType = "application/vnd.google-apps.folder" type MkdirArgs struct { - Out io.Writer - Name string - Parents []string + Out io.Writer + Name string + Parents []string } func (self *Drive) Mkdir(args MkdirArgs) error { - f, err := self.mkdir(args) - if err != nil { - return err - } - fmt.Fprintf(args.Out, "Directory %s created\n", f.Id) - return nil + f, err := self.mkdir(args) + if err != nil { + return err + } + fmt.Fprintf(args.Out, "Directory %s created\n", f.Id) + return nil } func (self *Drive) mkdir(args MkdirArgs) (*drive.File, error) { - dstFile := &drive.File{Name: args.Name, MimeType: DirectoryMimeType} + dstFile := &drive.File{Name: args.Name, MimeType: DirectoryMimeType} - // Set parent folders - dstFile.Parents = args.Parents + // Set parent folders + dstFile.Parents = args.Parents - // Create directory - f, err := self.service.Files.Create(dstFile).Do() - if err != nil { - return nil, fmt.Errorf("Failed to create directory: %s", err) - } + // Create directory + f, err := self.service.Files.Create(dstFile).Do() + if err != nil { + return nil, fmt.Errorf("Failed to create directory: %s", err) + } - return f, nil + return f, nil } diff --git a/drive/path.go b/drive/path.go index f5d1ad5..8043a01 100644 --- a/drive/path.go +++ b/drive/path.go @@ -1,65 +1,65 @@ package drive import ( - "fmt" - "path/filepath" - "google.golang.org/api/drive/v3" + "fmt" + "google.golang.org/api/drive/v3" + "path/filepath" ) func (self *Drive) newPathfinder() *remotePathfinder { - return &remotePathfinder{ - service: self.service.Files, - files: make(map[string]*drive.File), - } + return &remotePathfinder{ + service: self.service.Files, + files: make(map[string]*drive.File), + } } type remotePathfinder struct { - service *drive.FilesService - files map[string]*drive.File + service *drive.FilesService + files map[string]*drive.File } func (self *remotePathfinder) absPath(f *drive.File) (string, error) { - name := f.Name + name := f.Name - if len(f.Parents) == 0 { - return name, nil - } + if len(f.Parents) == 0 { + return name, nil + } - var path []string + var path []string - for { - parent, err := self.getParent(f.Parents[0]) - if err != nil { - return "", err - } + for { + parent, err := self.getParent(f.Parents[0]) + if err != nil { + return "", err + } - // Stop when we find the root dir - if len(parent.Parents) == 0 { - break - } + // Stop when we find the root dir + if len(parent.Parents) == 0 { + break + } - path = append([]string{parent.Name}, path...) - f = parent - } + path = append([]string{parent.Name}, path...) + f = parent + } - path = append(path, name) - return filepath.Join(path...), nil + path = append(path, name) + return filepath.Join(path...), nil } func (self *remotePathfinder) getParent(id string) (*drive.File, error) { - // Check cache - if f, ok := self.files[id]; ok { - return f, nil - } + // Check cache + if f, ok := self.files[id]; ok { + return f, nil + } - // Fetch file from drive - f, err := self.service.Get(id).Fields("id", "name", "parents").Do() - if err != nil { - return nil, fmt.Errorf("Failed to get file: %s", err) - } + // Fetch file from drive + f, err := self.service.Get(id).Fields("id", "name", "parents").Do() + if err != nil { + return nil, fmt.Errorf("Failed to get file: %s", err) + } - // Save in cache - self.files[f.Id] = f + // Save in cache + self.files[f.Id] = f - return f, nil + return f, nil } diff --git a/drive/progress.go b/drive/progress.go index 989191e..bb5740c 100644 --- a/drive/progress.go +++ b/drive/progress.go @@ -1,101 +1,101 @@ package drive import ( - "io" - "io/ioutil" - "fmt" - "time" + "fmt" + "io" + "io/ioutil" + "time" ) const MaxDrawInterval = time.Second * 1 const MaxRateInterval = time.Second * 3 func getProgressReader(r io.Reader, w io.Writer, size int64) io.Reader { - // Don't wrap reader if output is discarded or size is too small - if w == ioutil.Discard || (size > 0 && size < 1024 * 1024) { - return r - } - - return &Progress{ - Reader: r, - Writer: w, - Size: size, - } + // Don't wrap reader if output is discarded or size is too small + if w == ioutil.Discard || (size > 0 && size < 1024*1024) { + return r + } + + return &Progress{ + Reader: r, + Writer: w, + Size: size, + } } type Progress struct { - Writer io.Writer - Reader io.Reader - Size int64 - progress int64 - rate int64 - rateProgress int64 - rateUpdated time.Time - updated time.Time - done bool + Writer io.Writer + Reader io.Reader + Size int64 + progress int64 + rate int64 + rateProgress int64 + rateUpdated time.Time + updated time.Time + done bool } func (self *Progress) Read(p []byte) (int, error) { - // Read - n, err := self.Reader.Read(p) - - now := time.Now() - isLast := err != nil - - // Increment progress - newProgress := self.progress + int64(n) - self.progress = newProgress - - // Initialize rate state - if self.rateUpdated.IsZero() { - self.rateUpdated = now - self.rateProgress = newProgress - } - - // Update rate every x seconds - if self.rateUpdated.Add(MaxRateInterval).Before(now) { - self.rate = calcRate(newProgress - self.rateProgress, self.rateUpdated, now) - self.rateUpdated = now - self.rateProgress = newProgress - } - - // Draw progress every x seconds - if self.updated.Add(MaxDrawInterval).Before(now) || isLast { - self.draw(isLast) - self.updated = now - } - - // Mark as done if error occurs - self.done = isLast - - return n, err + // Read + n, err := self.Reader.Read(p) + + now := time.Now() + isLast := err != nil + + // Increment progress + newProgress := self.progress + int64(n) + self.progress = newProgress + + // Initialize rate state + if self.rateUpdated.IsZero() { + self.rateUpdated = now + self.rateProgress = newProgress + } + + // Update rate every x seconds + if self.rateUpdated.Add(MaxRateInterval).Before(now) { + self.rate = calcRate(newProgress-self.rateProgress, self.rateUpdated, now) + self.rateUpdated = now + self.rateProgress = newProgress + } + + // Draw progress every x seconds + if self.updated.Add(MaxDrawInterval).Before(now) || isLast { + self.draw(isLast) + self.updated = now + } + + // Mark as done if error occurs + self.done = isLast + + return n, err } func (self *Progress) draw(isLast bool) { - if self.done { - return - } + if self.done { + return + } - self.clear() + self.clear() - // Print progress - fmt.Fprintf(self.Writer, "%s", formatSize(self.progress, false)) + // Print progress + fmt.Fprintf(self.Writer, "%s", formatSize(self.progress, false)) - // Print total size - if self.Size > 0 { - fmt.Fprintf(self.Writer, "/%s", formatSize(self.Size, false)) - } + // Print total size + if self.Size > 0 { + fmt.Fprintf(self.Writer, "/%s", formatSize(self.Size, false)) + } - // Print rate - if self.rate > 0 { - fmt.Fprintf(self.Writer, ", Rate: %s/s", formatSize(self.rate, false)) - } + // Print rate + if self.rate > 0 { + fmt.Fprintf(self.Writer, ", Rate: %s/s", formatSize(self.rate, false)) + } - if isLast { - self.clear() - } + if isLast { + self.clear() + } } func (self *Progress) clear() { - fmt.Fprintf(self.Writer, "\r%50s\r", "") + fmt.Fprintf(self.Writer, "\r%50s\r", "") } diff --git a/drive/revision_delete.go b/drive/revision_delete.go index 88c81c6..de53041 100644 --- a/drive/revision_delete.go +++ b/drive/revision_delete.go @@ -1,31 +1,31 @@ package drive import ( - "io" - "fmt" + "fmt" + "io" ) type DeleteRevisionArgs struct { - Out io.Writer - FileId string - RevisionId string + Out io.Writer + FileId string + RevisionId string } func (self *Drive) DeleteRevision(args DeleteRevisionArgs) (err error) { - rev, err := self.service.Revisions.Get(args.FileId, args.RevisionId).Fields("originalFilename").Do() - if err != nil { - return fmt.Errorf("Failed to get revision: %s", err) - } + rev, err := self.service.Revisions.Get(args.FileId, args.RevisionId).Fields("originalFilename").Do() + if err != nil { + return fmt.Errorf("Failed to get revision: %s", err) + } - if rev.OriginalFilename == "" { - return fmt.Errorf("Deleting revisions for this file type is not supported") - } + if rev.OriginalFilename == "" { + return fmt.Errorf("Deleting revisions for this file type is not supported") + } - err = self.service.Revisions.Delete(args.FileId, args.RevisionId).Do() - if err != nil { - return fmt.Errorf("Failed to delete revision", err) - } + err = self.service.Revisions.Delete(args.FileId, args.RevisionId).Do() + if err != nil { + return fmt.Errorf("Failed to delete revision", err) + } - fmt.Fprintf(args.Out, "Deleted revision '%s'\n", args.RevisionId) - return + fmt.Fprintf(args.Out, "Deleted revision '%s'\n", args.RevisionId) + return } diff --git a/drive/revision_download.go b/drive/revision_download.go index 039cd19..04055fa 100644 --- a/drive/revision_download.go +++ b/drive/revision_download.go @@ -1,70 +1,70 @@ package drive import ( - "fmt" - "path/filepath" - "io" - "io/ioutil" + "fmt" + "io" + "io/ioutil" + "path/filepath" ) type DownloadRevisionArgs struct { - Out io.Writer - Progress io.Writer - FileId string - RevisionId string - Path string - Force bool - Stdout bool + Out io.Writer + Progress io.Writer + FileId string + RevisionId string + Path string + Force bool + Stdout bool } func (self *Drive) DownloadRevision(args DownloadRevisionArgs) (err error) { - getRev := self.service.Revisions.Get(args.FileId, args.RevisionId) + getRev := self.service.Revisions.Get(args.FileId, args.RevisionId) - rev, err := getRev.Fields("originalFilename").Do() - if err != nil { - return fmt.Errorf("Failed to get file: %s", err) - } + rev, err := getRev.Fields("originalFilename").Do() + if err != nil { + return fmt.Errorf("Failed to get file: %s", err) + } - if rev.OriginalFilename == "" { - return fmt.Errorf("Download is not supported for this file type") - } + if rev.OriginalFilename == "" { + return fmt.Errorf("Download is not supported for this file type") + } - // Get timeout reader wrapper and context - timeoutReaderWrapper, ctx := getTimeoutReaderWrapperContext() + // Get timeout reader wrapper and context + timeoutReaderWrapper, ctx := getTimeoutReaderWrapperContext() - res, err := getRev.Context(ctx).Download() - if err != nil { - return fmt.Errorf("Failed to download file: %s", err) - } + res, err := getRev.Context(ctx).Download() + if err != nil { + return fmt.Errorf("Failed to download file: %s", err) + } - // Close body on function exit - defer res.Body.Close() + // Close body on function exit + defer res.Body.Close() - // Discard other output if file is written to stdout - out := args.Out - if args.Stdout { - out = ioutil.Discard - } + // Discard other output if file is written to stdout + out := args.Out + if args.Stdout { + out = ioutil.Discard + } - // Path to file - fpath := filepath.Join(args.Path, rev.OriginalFilename) + // Path to file + fpath := filepath.Join(args.Path, rev.OriginalFilename) - fmt.Fprintf(out, "Downloading %s -> %s\n", rev.OriginalFilename, fpath) + fmt.Fprintf(out, "Downloading %s -> %s\n", rev.OriginalFilename, fpath) - bytes, rate, err := self.saveFile(saveFileArgs{ - out: args.Out, - body: timeoutReaderWrapper(res.Body), - contentLength: res.ContentLength, - fpath: fpath, - force: args.Force, - stdout: args.Stdout, - progress: args.Progress, - }) + bytes, rate, err := self.saveFile(saveFileArgs{ + out: args.Out, + body: timeoutReaderWrapper(res.Body), + contentLength: res.ContentLength, + fpath: fpath, + force: args.Force, + stdout: args.Stdout, + progress: args.Progress, + }) - if err != nil { - return err - } + if err != nil { + return err + } - fmt.Fprintf(out, "Download complete, rate: %s/s, total size: %s\n", formatSize(rate, false), formatSize(bytes, false)) - return nil + fmt.Fprintf(out, "Download complete, rate: %s/s, total size: %s\n", formatSize(rate, false), formatSize(bytes, false)) + return nil } diff --git a/drive/revision_list.go b/drive/revision_list.go index 941fbca..eec4dab 100644 --- a/drive/revision_list.go +++ b/drive/revision_list.go @@ -1,62 +1,62 @@ package drive import ( - "fmt" - "io" - "text/tabwriter" - "google.golang.org/api/drive/v3" + "fmt" + "google.golang.org/api/drive/v3" + "io" + "text/tabwriter" ) type ListRevisionsArgs struct { - Out io.Writer - Id string - NameWidth int64 - SkipHeader bool - SizeInBytes bool + Out io.Writer + Id string + NameWidth int64 + SkipHeader bool + SizeInBytes bool } func (self *Drive) ListRevisions(args ListRevisionsArgs) (err error) { - revList, err := self.service.Revisions.List(args.Id).Fields("revisions(id,keepForever,size,modifiedTime,originalFilename)").Do() - if err != nil { - return fmt.Errorf("Failed listing revisions: %s", err) - } - - PrintRevisionList(PrintRevisionListArgs{ - Out: args.Out, - Revisions: revList.Revisions, - NameWidth: int(args.NameWidth), - SkipHeader: args.SkipHeader, - SizeInBytes: args.SizeInBytes, - }) - - return + revList, err := self.service.Revisions.List(args.Id).Fields("revisions(id,keepForever,size,modifiedTime,originalFilename)").Do() + if err != nil { + return fmt.Errorf("Failed listing revisions: %s", err) + } + + PrintRevisionList(PrintRevisionListArgs{ + Out: args.Out, + Revisions: revList.Revisions, + NameWidth: int(args.NameWidth), + SkipHeader: args.SkipHeader, + SizeInBytes: args.SizeInBytes, + }) + + return } type PrintRevisionListArgs struct { - Out io.Writer - Revisions []*drive.Revision - NameWidth int - SkipHeader bool - SizeInBytes bool + Out io.Writer + Revisions []*drive.Revision + NameWidth int + SkipHeader bool + SizeInBytes bool } func PrintRevisionList(args PrintRevisionListArgs) { - w := new(tabwriter.Writer) - w.Init(args.Out, 0, 0, 3, ' ', 0) - - if !args.SkipHeader { - fmt.Fprintln(w, "Id\tName\tSize\tModified\tKeepForever") - } - - for _, rev := range args.Revisions { - fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", - rev.Id, - truncateString(rev.OriginalFilename, args.NameWidth), - formatSize(rev.Size, args.SizeInBytes), - formatDatetime(rev.ModifiedTime), - formatBool(rev.KeepForever), - ) - } - - w.Flush() + w := new(tabwriter.Writer) + w.Init(args.Out, 0, 0, 3, ' ', 0) + + if !args.SkipHeader { + fmt.Fprintln(w, "Id\tName\tSize\tModified\tKeepForever") + } + + for _, rev := range args.Revisions { + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", + rev.Id, + truncateString(rev.OriginalFilename, args.NameWidth), + formatSize(rev.Size, args.SizeInBytes), + formatDatetime(rev.ModifiedTime), + formatBool(rev.KeepForever), + ) + } + + w.Flush() } diff --git a/drive/share.go b/drive/share.go index 291512a..69b9c7d 100644 --- a/drive/share.go +++ b/drive/share.go @@ -1,109 +1,109 @@ package drive import ( - "io" - "fmt" - "text/tabwriter" - "google.golang.org/api/drive/v3" + "fmt" + "google.golang.org/api/drive/v3" + "io" + "text/tabwriter" ) type ShareArgs struct { - Out io.Writer - FileId string - Role string - Type string - Email string - Discoverable bool + Out io.Writer + FileId string + Role string + Type string + Email string + Discoverable bool } func (self *Drive) Share(args ShareArgs) error { - permission := &drive.Permission{ - AllowFileDiscovery: args.Discoverable, - Role: args.Role, - Type: args.Type, - EmailAddress: args.Email, - } - - _, err := self.service.Permissions.Create(args.FileId, permission).Do() - if err != nil { - return fmt.Errorf("Failed to share file: %s", err) - } - - fmt.Fprintf(args.Out, "Granted %s permission to %s\n", args.Role, args.Type) - return nil + permission := &drive.Permission{ + AllowFileDiscovery: args.Discoverable, + Role: args.Role, + Type: args.Type, + EmailAddress: args.Email, + } + + _, err := self.service.Permissions.Create(args.FileId, permission).Do() + if err != nil { + return fmt.Errorf("Failed to share file: %s", err) + } + + fmt.Fprintf(args.Out, "Granted %s permission to %s\n", args.Role, args.Type) + return nil } type RevokePermissionArgs struct { - Out io.Writer - FileId string - PermissionId string + Out io.Writer + FileId string + PermissionId string } func (self *Drive) RevokePermission(args RevokePermissionArgs) error { - err := self.service.Permissions.Delete(args.FileId, args.PermissionId).Do() - if err != nil { - fmt.Errorf("Failed to revoke permission: %s", err) - return err - } - - fmt.Fprintf(args.Out, "Permission revoked\n") - return nil + err := self.service.Permissions.Delete(args.FileId, args.PermissionId).Do() + if err != nil { + fmt.Errorf("Failed to revoke permission: %s", err) + return err + } + + fmt.Fprintf(args.Out, "Permission revoked\n") + return nil } type ListPermissionsArgs struct { - Out io.Writer - FileId string + Out io.Writer + FileId string } func (self *Drive) ListPermissions(args ListPermissionsArgs) error { - permList, err := self.service.Permissions.List(args.FileId).Fields("permissions(id,role,type,domain,emailAddress,allowFileDiscovery)").Do() - if err != nil { - fmt.Errorf("Failed to list permissions: %s", err) - return err - } - - printPermissions(printPermissionsArgs{ - out: args.Out, - permissions: permList.Permissions, - }) - return nil + permList, err := self.service.Permissions.List(args.FileId).Fields("permissions(id,role,type,domain,emailAddress,allowFileDiscovery)").Do() + if err != nil { + fmt.Errorf("Failed to list permissions: %s", err) + return err + } + + printPermissions(printPermissionsArgs{ + out: args.Out, + permissions: permList.Permissions, + }) + return nil } func (self *Drive) shareAnyoneReader(fileId string) error { - permission := &drive.Permission{ - Role: "reader", - Type: "anyone", - } + permission := &drive.Permission{ + Role: "reader", + Type: "anyone", + } - _, err := self.service.Permissions.Create(fileId, permission).Do() - if err != nil { - return fmt.Errorf("Failed to share file: %s", err) - } + _, err := self.service.Permissions.Create(fileId, permission).Do() + if err != nil { + return fmt.Errorf("Failed to share file: %s", err) + } - return nil + return nil } type printPermissionsArgs struct { - out io.Writer - permissions []*drive.Permission + out io.Writer + permissions []*drive.Permission } func printPermissions(args printPermissionsArgs) { - w := new(tabwriter.Writer) - w.Init(args.out, 0, 0, 3, ' ', 0) - - fmt.Fprintln(w, "Id\tType\tRole\tEmail\tDomain\tDiscoverable") - - for _, p := range args.permissions { - fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", - p.Id, - p.Type, - p.Role, - p.EmailAddress, - p.Domain, - formatBool(p.AllowFileDiscovery), - ) - } - - w.Flush() + w := new(tabwriter.Writer) + w.Init(args.out, 0, 0, 3, ' ', 0) + + fmt.Fprintln(w, "Id\tType\tRole\tEmail\tDomain\tDiscoverable") + + for _, p := range args.permissions { + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", + p.Id, + p.Type, + p.Role, + p.EmailAddress, + p.Domain, + formatBool(p.AllowFileDiscovery), + ) + } + + w.Flush() } diff --git a/drive/sync.go b/drive/sync.go index 2124f8f..35ab16e 100644 --- a/drive/sync.go +++ b/drive/sync.go @@ -1,17 +1,17 @@ package drive import ( - "time" - "fmt" - "os" - "io" - "strings" - "path/filepath" - "text/tabwriter" - "github.com/soniakeys/graph" - "github.com/sabhiram/go-git-ignore" - "google.golang.org/api/drive/v3" - "google.golang.org/api/googleapi" + "fmt" + "github.com/sabhiram/go-git-ignore" + "github.com/soniakeys/graph" + "google.golang.org/api/drive/v3" + "google.golang.org/api/googleapi" + "io" + "os" + "path/filepath" + "strings" + "text/tabwriter" + "time" ) const DefaultIgnoreFile = ".gdriveignore" @@ -19,595 +19,607 @@ const DefaultIgnoreFile = ".gdriveignore" type ModTime int const ( - LocalLastModified ModTime = iota - RemoteLastModified - EqualModifiedTime + LocalLastModified ModTime = iota + RemoteLastModified + EqualModifiedTime ) type LargestSize int const ( - LocalLargestSize LargestSize = iota - RemoteLargestSize - EqualSize + LocalLargestSize LargestSize = iota + RemoteLargestSize + EqualSize ) type ConflictResolution int const ( - NoResolution ConflictResolution = iota - KeepLocal - KeepRemote - KeepLargest + NoResolution ConflictResolution = iota + KeepLocal + KeepRemote + KeepLargest ) func (self *Drive) prepareSyncFiles(localPath string, root *drive.File, cmp FileComparer) (*syncFiles, error) { - localCh := make(chan struct{files []*LocalFile; err error}) - remoteCh := make(chan struct{files []*RemoteFile; err error}) - - go func() { - files, err := prepareLocalFiles(localPath) - localCh <- struct{files []*LocalFile; err error}{files, err} - }() - - go func() { - files, err := self.prepareRemoteFiles(root, "") - remoteCh <- struct{files []*RemoteFile; err error}{files, err} - }() - - local := <-localCh - if local.err != nil { - return nil, local.err - } - - remote := <-remoteCh - if remote.err != nil { - return nil, remote.err - } - - return &syncFiles{ - root: &RemoteFile{file: root}, - local: local.files, - remote: remote.files, - compare: cmp, - }, nil + localCh := make(chan struct { + files []*LocalFile + err error + }) + remoteCh := make(chan struct { + files []*RemoteFile + err error + }) + + go func() { + files, err := prepareLocalFiles(localPath) + localCh <- struct { + files []*LocalFile + err error + }{files, err} + }() + + go func() { + files, err := self.prepareRemoteFiles(root, "") + remoteCh <- struct { + files []*RemoteFile + err error + }{files, err} + }() + + local := <-localCh + if local.err != nil { + return nil, local.err + } + + remote := <-remoteCh + if remote.err != nil { + return nil, remote.err + } + + return &syncFiles{ + root: &RemoteFile{file: root}, + local: local.files, + remote: remote.files, + compare: cmp, + }, nil } func (self *Drive) isSyncFile(id string) (bool, error) { - f, err := self.service.Files.Get(id).Fields("appProperties").Do() - if err != nil { - return false, fmt.Errorf("Failed to get file: %s", err) - } + f, err := self.service.Files.Get(id).Fields("appProperties").Do() + if err != nil { + return false, fmt.Errorf("Failed to get file: %s", err) + } - _, ok := f.AppProperties["sync"] - return ok, nil + _, ok := f.AppProperties["sync"] + return ok, nil } func prepareLocalFiles(root string) ([]*LocalFile, error) { - var files []*LocalFile - - // Get absolute root path - absRootPath, err := filepath.Abs(root) - if err != nil { - return nil, err - } - - // Prepare ignorer - shouldIgnore, err := prepareIgnorer(filepath.Join(absRootPath, DefaultIgnoreFile)) - if err != nil { - return nil, err - } - - err = filepath.Walk(absRootPath, func(absPath string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - // Skip root directory - if absPath == absRootPath { - return nil - } - - // Skip files that are not a directory or regular file - if !info.IsDir() && !info.Mode().IsRegular() { - return nil - } - - // Get relative path from root - relPath, err := filepath.Rel(absRootPath, absPath) - if err != nil { - return err - } - - // Skip file if it is ignored by ignore file - if shouldIgnore(relPath) { - return nil - } - - files = append(files, &LocalFile{ - absPath: absPath, - relPath: relPath, - info: info, - }) - - return nil - }) - - if err != nil { - return nil, fmt.Errorf("Failed to prepare local files: %s", err) - } - - return files, err + var files []*LocalFile + + // Get absolute root path + absRootPath, err := filepath.Abs(root) + if err != nil { + return nil, err + } + + // Prepare ignorer + shouldIgnore, err := prepareIgnorer(filepath.Join(absRootPath, DefaultIgnoreFile)) + if err != nil { + return nil, err + } + + err = filepath.Walk(absRootPath, func(absPath string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Skip root directory + if absPath == absRootPath { + return nil + } + + // Skip files that are not a directory or regular file + if !info.IsDir() && !info.Mode().IsRegular() { + return nil + } + + // Get relative path from root + relPath, err := filepath.Rel(absRootPath, absPath) + if err != nil { + return err + } + + // Skip file if it is ignored by ignore file + if shouldIgnore(relPath) { + return nil + } + + files = append(files, &LocalFile{ + absPath: absPath, + relPath: relPath, + info: info, + }) + + return nil + }) + + if err != nil { + return nil, fmt.Errorf("Failed to prepare local files: %s", err) + } + + return files, err } func (self *Drive) prepareRemoteFiles(rootDir *drive.File, sortOrder string) ([]*RemoteFile, error) { - // Find all files which has rootDir as root - listArgs := listAllFilesArgs{ - query: fmt.Sprintf("appProperties has {key='syncRootId' and value='%s'}", rootDir.Id), - fields: []googleapi.Field{"nextPageToken", "files(id,name,parents,md5Checksum,mimeType,size,modifiedTime)"}, - sortOrder: sortOrder, - } - files, err := self.listAllFiles(listArgs) - if err != nil { - return nil, fmt.Errorf("Failed listing files: %s", err) - } - - if err := checkFiles(files); err != nil { - return nil, err - } - - relPaths, err := prepareRemoteRelPaths(rootDir, files) - if err != nil { - return nil, err - } - - var remoteFiles []*RemoteFile - for _, f := range files { - relPath, ok := relPaths[f.Id] - if !ok { - return nil, fmt.Errorf("File %s does not have a valid parent", f.Id) - } - remoteFiles = append(remoteFiles, &RemoteFile{ - relPath: relPath, - file: f, - }) - } - - return remoteFiles, nil + // Find all files which has rootDir as root + listArgs := listAllFilesArgs{ + query: fmt.Sprintf("appProperties has {key='syncRootId' and value='%s'}", rootDir.Id), + fields: []googleapi.Field{"nextPageToken", "files(id,name,parents,md5Checksum,mimeType,size,modifiedTime)"}, + sortOrder: sortOrder, + } + files, err := self.listAllFiles(listArgs) + if err != nil { + return nil, fmt.Errorf("Failed listing files: %s", err) + } + + if err := checkFiles(files); err != nil { + return nil, err + } + + relPaths, err := prepareRemoteRelPaths(rootDir, files) + if err != nil { + return nil, err + } + + var remoteFiles []*RemoteFile + for _, f := range files { + relPath, ok := relPaths[f.Id] + if !ok { + return nil, fmt.Errorf("File %s does not have a valid parent", f.Id) + } + remoteFiles = append(remoteFiles, &RemoteFile{ + relPath: relPath, + file: f, + }) + } + + return remoteFiles, nil } func prepareRemoteRelPaths(root *drive.File, files []*drive.File) (map[string]string, error) { - // The tree only holds integer values so we use - // maps to lookup file by index and index by file id - indexLookup := map[string]graph.NI{} - fileLookup := map[graph.NI]*drive.File{} - - // All files includes root dir - allFiles := append([]*drive.File{root}, files...) - - // Prepare lookup maps - for i, f := range allFiles { - indexLookup[f.Id] = graph.NI(i) - fileLookup[graph.NI(i)] = f - } - - // This will hold 'parent index' -> 'file index' relationships - pathEnds := make([]graph.PathEnd, len(allFiles)) - - // Prepare parent -> file relationships - for i, f := range allFiles { - if f == root { - pathEnds[i] = graph.PathEnd{From: -1} - continue - } - - // Lookup index of parent - parentIdx, found := indexLookup[f.Parents[0]] - if !found { - return nil, fmt.Errorf("Could not find parent of %s (%s)", f.Id, f.Name) - } - pathEnds[i] = graph.PathEnd{From: parentIdx} - } - - // Create parent pointer tree and calculate path lengths - tree := &graph.FromList{Paths: pathEnds} - tree.RecalcLeaves() - tree.RecalcLen() - - // This will hold a map of file id => relative path - paths := map[string]string{} - - // Find relative path from root for all files - for _, f := range allFiles { - if f == root { - continue - } - - // Find nodes between root and file - nodes := tree.PathTo(indexLookup[f.Id], nil) - - // This will hold the name of all paths between root and - // file (exluding root and including file itself) - pathNames := []string{} - - // Lookup file for each node and grab name - for _, n := range nodes { - file := fileLookup[n] - if file == root { - continue - } - pathNames = append(pathNames, file.Name) - } - - // Join path names to form relative path and add to map - paths[f.Id] = filepath.Join(pathNames...) - } - - return paths, nil + // The tree only holds integer values so we use + // maps to lookup file by index and index by file id + indexLookup := map[string]graph.NI{} + fileLookup := map[graph.NI]*drive.File{} + + // All files includes root dir + allFiles := append([]*drive.File{root}, files...) + + // Prepare lookup maps + for i, f := range allFiles { + indexLookup[f.Id] = graph.NI(i) + fileLookup[graph.NI(i)] = f + } + + // This will hold 'parent index' -> 'file index' relationships + pathEnds := make([]graph.PathEnd, len(allFiles)) + + // Prepare parent -> file relationships + for i, f := range allFiles { + if f == root { + pathEnds[i] = graph.PathEnd{From: -1} + continue + } + + // Lookup index of parent + parentIdx, found := indexLookup[f.Parents[0]] + if !found { + return nil, fmt.Errorf("Could not find parent of %s (%s)", f.Id, f.Name) + } + pathEnds[i] = graph.PathEnd{From: parentIdx} + } + + // Create parent pointer tree and calculate path lengths + tree := &graph.FromList{Paths: pathEnds} + tree.RecalcLeaves() + tree.RecalcLen() + + // This will hold a map of file id => relative path + paths := map[string]string{} + + // Find relative path from root for all files + for _, f := range allFiles { + if f == root { + continue + } + + // Find nodes between root and file + nodes := tree.PathTo(indexLookup[f.Id], nil) + + // This will hold the name of all paths between root and + // file (exluding root and including file itself) + pathNames := []string{} + + // Lookup file for each node and grab name + for _, n := range nodes { + file := fileLookup[n] + if file == root { + continue + } + pathNames = append(pathNames, file.Name) + } + + // Join path names to form relative path and add to map + paths[f.Id] = filepath.Join(pathNames...) + } + + return paths, nil } func checkFiles(files []*drive.File) error { - uniq := map[string]string{} + uniq := map[string]string{} - for _, f := range files { - // Ensure all files have exactly one parent - if len(f.Parents) != 1 { - return fmt.Errorf("File %s does not have exacly one parent", f.Id) - } + for _, f := range files { + // Ensure all files have exactly one parent + if len(f.Parents) != 1 { + return fmt.Errorf("File %s does not have exacly one parent", f.Id) + } - // Ensure that there are no duplicate files - uniqKey := f.Name + f.Parents[0] - if dupeId, isDupe := uniq[uniqKey]; isDupe { - return fmt.Errorf("Found name collision between %s and %s", f.Id, dupeId) - } - uniq[uniqKey] = f.Id - } + // Ensure that there are no duplicate files + uniqKey := f.Name + f.Parents[0] + if dupeId, isDupe := uniq[uniqKey]; isDupe { + return fmt.Errorf("Found name collision between %s and %s", f.Id, dupeId) + } + uniq[uniqKey] = f.Id + } - return nil + return nil } type LocalFile struct { - absPath string - relPath string - info os.FileInfo + absPath string + relPath string + info os.FileInfo } type RemoteFile struct { - relPath string - file *drive.File + relPath string + file *drive.File } type changedFile struct { - local *LocalFile - remote *RemoteFile + local *LocalFile + remote *RemoteFile } type syncFiles struct { - root *RemoteFile - local []*LocalFile - remote []*RemoteFile - compare FileComparer + root *RemoteFile + local []*LocalFile + remote []*RemoteFile + compare FileComparer } type FileComparer interface { - Changed(*LocalFile, *RemoteFile) bool + Changed(*LocalFile, *RemoteFile) bool } func (self LocalFile) AbsPath() string { - return self.absPath + return self.absPath } func (self LocalFile) Size() int64 { - return self.info.Size() + return self.info.Size() } func (self LocalFile) Modified() time.Time { - return self.info.ModTime() + return self.info.ModTime() } func (self RemoteFile) Md5() string { - return self.file.Md5Checksum + return self.file.Md5Checksum } func (self RemoteFile) Size() int64 { - return self.file.Size + return self.file.Size } func (self RemoteFile) Modified() time.Time { - t, _ := time.Parse(time.RFC3339, self.file.ModifiedTime) - return t + t, _ := time.Parse(time.RFC3339, self.file.ModifiedTime) + return t } func (self *changedFile) compareModTime() ModTime { - localTime := self.local.Modified() - remoteTime := self.remote.Modified() + localTime := self.local.Modified() + remoteTime := self.remote.Modified() - if localTime.After(remoteTime) { - return LocalLastModified - } + if localTime.After(remoteTime) { + return LocalLastModified + } - if remoteTime.After(localTime) { - return RemoteLastModified - } + if remoteTime.After(localTime) { + return RemoteLastModified + } - return EqualModifiedTime + return EqualModifiedTime } func (self *changedFile) compareSize() LargestSize { - localSize := self.local.Size() - remoteSize := self.remote.Size() + localSize := self.local.Size() + remoteSize := self.remote.Size() - if localSize > remoteSize { - return LocalLargestSize - } + if localSize > remoteSize { + return LocalLargestSize + } - if remoteSize > localSize { - return RemoteLargestSize - } + if remoteSize > localSize { + return RemoteLargestSize + } - return EqualSize + return EqualSize } func (self *syncFiles) filterMissingRemoteDirs() []*LocalFile { - var files []*LocalFile + var files []*LocalFile - for _, lf := range self.local { - if lf.info.IsDir() && !self.existsRemote(lf) { - files = append(files, lf) - } - } + for _, lf := range self.local { + if lf.info.IsDir() && !self.existsRemote(lf) { + files = append(files, lf) + } + } - return files + return files } func (self *syncFiles) filterMissingLocalDirs() []*RemoteFile { - var files []*RemoteFile + var files []*RemoteFile - for _, rf := range self.remote { - if isDir(rf.file) && !self.existsLocal(rf) { - files = append(files, rf) - } - } + for _, rf := range self.remote { + if isDir(rf.file) && !self.existsLocal(rf) { + files = append(files, rf) + } + } - return files + return files } func (self *syncFiles) filterMissingRemoteFiles() []*LocalFile { - var files []*LocalFile + var files []*LocalFile - for _, lf := range self.local { - if !lf.info.IsDir() && !self.existsRemote(lf) { - files = append(files, lf) - } - } + for _, lf := range self.local { + if !lf.info.IsDir() && !self.existsRemote(lf) { + files = append(files, lf) + } + } - return files + return files } func (self *syncFiles) filterMissingLocalFiles() []*RemoteFile { - var files []*RemoteFile + var files []*RemoteFile - for _, rf := range self.remote { - if !isDir(rf.file) && !self.existsLocal(rf) { - files = append(files, rf) - } - } + for _, rf := range self.remote { + if !isDir(rf.file) && !self.existsLocal(rf) { + files = append(files, rf) + } + } - return files + return files } func (self *syncFiles) filterChangedLocalFiles() []*changedFile { - var files []*changedFile + var files []*changedFile - for _, lf := range self.local { - // Skip directories - if lf.info.IsDir() { - continue - } + for _, lf := range self.local { + // Skip directories + if lf.info.IsDir() { + continue + } - // Skip files that don't exist on drive - rf, found := self.findRemoteByPath(lf.relPath) - if !found { - continue - } + // Skip files that don't exist on drive + rf, found := self.findRemoteByPath(lf.relPath) + if !found { + continue + } - // Check if file has changed - if self.compare.Changed(lf, rf) { - files = append(files, &changedFile{ - local: lf, - remote: rf, - }) - } - } + // Check if file has changed + if self.compare.Changed(lf, rf) { + files = append(files, &changedFile{ + local: lf, + remote: rf, + }) + } + } - return files + return files } func (self *syncFiles) filterChangedRemoteFiles() []*changedFile { - var files []*changedFile + var files []*changedFile - for _, rf := range self.remote { - // Skip directories - if isDir(rf.file) { - continue - } + for _, rf := range self.remote { + // Skip directories + if isDir(rf.file) { + continue + } - // Skip local files that don't exist - lf, found := self.findLocalByPath(rf.relPath) - if !found { - continue - } + // Skip local files that don't exist + lf, found := self.findLocalByPath(rf.relPath) + if !found { + continue + } - // Check if file has changed - if self.compare.Changed(lf, rf) { - files = append(files, &changedFile{ - local: lf, - remote: rf, - }) - } - } + // Check if file has changed + if self.compare.Changed(lf, rf) { + files = append(files, &changedFile{ + local: lf, + remote: rf, + }) + } + } - return files + return files } func (self *syncFiles) filterExtraneousRemoteFiles() []*RemoteFile { - var files []*RemoteFile + var files []*RemoteFile - for _, rf := range self.remote { - if !self.existsLocal(rf) { - files = append(files, rf) - } - } + for _, rf := range self.remote { + if !self.existsLocal(rf) { + files = append(files, rf) + } + } - return files + return files } func (self *syncFiles) filterExtraneousLocalFiles() []*LocalFile { - var files []*LocalFile + var files []*LocalFile - for _, lf := range self.local { - if !self.existsRemote(lf) { - files = append(files, lf) - } - } + for _, lf := range self.local { + if !self.existsRemote(lf) { + files = append(files, lf) + } + } - return files + return files } func (self *syncFiles) existsRemote(lf *LocalFile) bool { - _, found := self.findRemoteByPath(lf.relPath) - return found + _, found := self.findRemoteByPath(lf.relPath) + return found } func (self *syncFiles) existsLocal(rf *RemoteFile) bool { - _, found := self.findLocalByPath(rf.relPath) - return found + _, found := self.findLocalByPath(rf.relPath) + return found } func (self *syncFiles) findRemoteByPath(relPath string) (*RemoteFile, bool) { - if relPath == "." { - return self.root, true - } + if relPath == "." { + return self.root, true + } - for _, rf := range self.remote { - if relPath == rf.relPath { - return rf, true - } - } + for _, rf := range self.remote { + if relPath == rf.relPath { + return rf, true + } + } - return nil, false + return nil, false } func (self *syncFiles) findLocalByPath(relPath string) (*LocalFile, bool) { - for _, lf := range self.local { - if relPath == lf.relPath { - return lf, true - } - } + for _, lf := range self.local { + if relPath == lf.relPath { + return lf, true + } + } - return nil, false + return nil, false } func findLocalConflicts(files []*changedFile) []*changedFile { - var conflicts []*changedFile + var conflicts []*changedFile - for _, cf := range files { - if cf.compareModTime() == LocalLastModified { - conflicts = append(conflicts, cf) - } - } + for _, cf := range files { + if cf.compareModTime() == LocalLastModified { + conflicts = append(conflicts, cf) + } + } - return conflicts + return conflicts } func findRemoteConflicts(files []*changedFile) []*changedFile { - var conflicts []*changedFile + var conflicts []*changedFile - for _, cf := range files { - if cf.compareModTime() == RemoteLastModified { - conflicts = append(conflicts, cf) - } - } + for _, cf := range files { + if cf.compareModTime() == RemoteLastModified { + conflicts = append(conflicts, cf) + } + } - return conflicts + return conflicts } type byLocalPathLength []*LocalFile func (self byLocalPathLength) Len() int { - return len(self) + return len(self) } func (self byLocalPathLength) Swap(i, j int) { - self[i], self[j] = self[j], self[i] + self[i], self[j] = self[j], self[i] } func (self byLocalPathLength) Less(i, j int) bool { - return pathLength(self[i].relPath) < pathLength(self[j].relPath) + return pathLength(self[i].relPath) < pathLength(self[j].relPath) } type byRemotePathLength []*RemoteFile func (self byRemotePathLength) Len() int { - return len(self) + return len(self) } func (self byRemotePathLength) Swap(i, j int) { - self[i], self[j] = self[j], self[i] + self[i], self[j] = self[j], self[i] } func (self byRemotePathLength) Less(i, j int) bool { - return pathLength(self[i].relPath) < pathLength(self[j].relPath) + return pathLength(self[i].relPath) < pathLength(self[j].relPath) } type byRemotePath []*RemoteFile func (self byRemotePath) Len() int { - return len(self) + return len(self) } func (self byRemotePath) Swap(i, j int) { - self[i], self[j] = self[j], self[i] + self[i], self[j] = self[j], self[i] } func (self byRemotePath) Less(i, j int) bool { - return strings.ToLower(self[i].relPath) < strings.ToLower(self[j].relPath) + return strings.ToLower(self[i].relPath) < strings.ToLower(self[j].relPath) } type ignoreFunc func(string) bool func prepareIgnorer(path string) (ignoreFunc, error) { - acceptAll := func(string) bool { - return false - } + acceptAll := func(string) bool { + return false + } - if !fileExists(path) { - return acceptAll, nil - } + if !fileExists(path) { + return acceptAll, nil + } - ignorer, err := ignore.CompileIgnoreFile(path) - if err != nil { - return acceptAll, fmt.Errorf("Failed to prepare ignorer: %s", err) - } + ignorer, err := ignore.CompileIgnoreFile(path) + if err != nil { + return acceptAll, fmt.Errorf("Failed to prepare ignorer: %s", err) + } - return ignorer.MatchesPath, nil + return ignorer.MatchesPath, nil } func formatConflicts(conflicts []*changedFile, out io.Writer) { - w := new(tabwriter.Writer) - w.Init(out, 0, 0, 3, ' ', 0) - - fmt.Fprintln(w, "Path\tSize Local\tSize Remote\tModified Local\tModified Remote") - - for _, cf := range conflicts { - fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", - truncateString(cf.local.relPath, 60), - formatSize(cf.local.Size(), false), - formatSize(cf.remote.Size(), false), - cf.local.Modified().Local().Format("Jan _2 2006 15:04:05.000"), - cf.remote.Modified().Local().Format("Jan _2 2006 15:04:05.000"), - ) - } - - w.Flush() + w := new(tabwriter.Writer) + w.Init(out, 0, 0, 3, ' ', 0) + + fmt.Fprintln(w, "Path\tSize Local\tSize Remote\tModified Local\tModified Remote") + + for _, cf := range conflicts { + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", + truncateString(cf.local.relPath, 60), + formatSize(cf.local.Size(), false), + formatSize(cf.remote.Size(), false), + cf.local.Modified().Local().Format("Jan _2 2006 15:04:05.000"), + cf.remote.Modified().Local().Format("Jan _2 2006 15:04:05.000"), + ) + } + + w.Flush() } diff --git a/drive/sync_download.go b/drive/sync_download.go index 4d84eea..04b50b9 100644 --- a/drive/sync_download.go +++ b/drive/sync_download.go @@ -1,325 +1,325 @@ package drive import ( - "fmt" - "io" - "os" - "sort" - "time" - "bytes" - "path/filepath" - "google.golang.org/api/googleapi" - "google.golang.org/api/drive/v3" + "bytes" + "fmt" + "google.golang.org/api/drive/v3" + "google.golang.org/api/googleapi" + "io" + "os" + "path/filepath" + "sort" + "time" ) type DownloadSyncArgs struct { - Out io.Writer - Progress io.Writer - RootId string - Path string - DryRun bool - DeleteExtraneous bool - Resolution ConflictResolution - Comparer FileComparer + Out io.Writer + Progress io.Writer + RootId string + Path string + DryRun bool + DeleteExtraneous bool + Resolution ConflictResolution + Comparer FileComparer } func (self *Drive) DownloadSync(args DownloadSyncArgs) error { - fmt.Fprintln(args.Out, "Starting sync...") - started := time.Now() - - // Get remote root dir - rootDir, err := self.getSyncRoot(args.RootId) - if err != nil { - return err - } - - fmt.Fprintln(args.Out, "Collecting file information...") - files, err := self.prepareSyncFiles(args.Path, rootDir, args.Comparer) - if err != nil { - return err - } - - // Find changed files - changedFiles := files.filterChangedRemoteFiles() - - fmt.Fprintf(args.Out, "Found %d local files and %d remote files\n", len(files.local), len(files.remote)) - - // Ensure that we don't overwrite any local changes - if args.Resolution == NoResolution { - err = ensureNoLocalModifications(changedFiles) - if err != nil { - return fmt.Errorf("Conflict detected!\nThe following files have changed and the local file are newer than it's remote counterpart:\n\n%s\nNo conflict resolution was given, aborting...", err) - } - } - - // Create missing directories - err = self.createMissingLocalDirs(files, args) - if err != nil { - return err - } - - // Download missing files - err = self.downloadMissingFiles(files, args) - if err != nil { - return err - } - - // Download files that has changed - err = self.downloadChangedFiles(changedFiles, args) - if err != nil { - return err - } - - // Delete extraneous local files - if args.DeleteExtraneous { - err = self.deleteExtraneousLocalFiles(files, args) - if err != nil { - return err - } - } - fmt.Fprintf(args.Out, "Sync finished in %s\n", time.Since(started)) - - return nil + fmt.Fprintln(args.Out, "Starting sync...") + started := time.Now() + + // Get remote root dir + rootDir, err := self.getSyncRoot(args.RootId) + if err != nil { + return err + } + + fmt.Fprintln(args.Out, "Collecting file information...") + files, err := self.prepareSyncFiles(args.Path, rootDir, args.Comparer) + if err != nil { + return err + } + + // Find changed files + changedFiles := files.filterChangedRemoteFiles() + + fmt.Fprintf(args.Out, "Found %d local files and %d remote files\n", len(files.local), len(files.remote)) + + // Ensure that we don't overwrite any local changes + if args.Resolution == NoResolution { + err = ensureNoLocalModifications(changedFiles) + if err != nil { + return fmt.Errorf("Conflict detected!\nThe following files have changed and the local file are newer than it's remote counterpart:\n\n%s\nNo conflict resolution was given, aborting...", err) + } + } + + // Create missing directories + err = self.createMissingLocalDirs(files, args) + if err != nil { + return err + } + + // Download missing files + err = self.downloadMissingFiles(files, args) + if err != nil { + return err + } + + // Download files that has changed + err = self.downloadChangedFiles(changedFiles, args) + if err != nil { + return err + } + + // Delete extraneous local files + if args.DeleteExtraneous { + err = self.deleteExtraneousLocalFiles(files, args) + if err != nil { + return err + } + } + fmt.Fprintf(args.Out, "Sync finished in %s\n", time.Since(started)) + + return nil } func (self *Drive) getSyncRoot(rootId string) (*drive.File, error) { - fields := []googleapi.Field{"id", "name", "mimeType", "appProperties"} - f, err := self.service.Files.Get(rootId).Fields(fields...).Do() - if err != nil { - return nil, fmt.Errorf("Failed to find root dir: %s", err) - } - - // Ensure file is a directory - if !isDir(f) { - return nil, fmt.Errorf("Provided root id is not a directory") - } - - // Ensure directory is a proper syncRoot - if _, ok := f.AppProperties["syncRoot"]; !ok { - return nil, fmt.Errorf("Provided id is not a sync root directory") - } - - return f, nil + fields := []googleapi.Field{"id", "name", "mimeType", "appProperties"} + f, err := self.service.Files.Get(rootId).Fields(fields...).Do() + if err != nil { + return nil, fmt.Errorf("Failed to find root dir: %s", err) + } + + // Ensure file is a directory + if !isDir(f) { + return nil, fmt.Errorf("Provided root id is not a directory") + } + + // Ensure directory is a proper syncRoot + if _, ok := f.AppProperties["syncRoot"]; !ok { + return nil, fmt.Errorf("Provided id is not a sync root directory") + } + + return f, nil } func (self *Drive) createMissingLocalDirs(files *syncFiles, args DownloadSyncArgs) error { - missingDirs := files.filterMissingLocalDirs() - missingCount := len(missingDirs) + missingDirs := files.filterMissingLocalDirs() + missingCount := len(missingDirs) - if missingCount > 0 { - fmt.Fprintf(args.Out, "\n%d local directories are missing\n", missingCount) - } + if missingCount > 0 { + fmt.Fprintf(args.Out, "\n%d local directories are missing\n", missingCount) + } - // Sort directories so that the dirs with the shortest path comes first - sort.Sort(byRemotePathLength(missingDirs)) + // Sort directories so that the dirs with the shortest path comes first + sort.Sort(byRemotePathLength(missingDirs)) - for i, rf := range missingDirs { - absPath, err := filepath.Abs(filepath.Join(args.Path, rf.relPath)) - if err != nil { - return fmt.Errorf("Failed to determine local absolute path: %s", err) - } - fmt.Fprintf(args.Out, "[%04d/%04d] Creating directory %s\n", i + 1, missingCount, filepath.Join(filepath.Base(args.Path), rf.relPath)) + for i, rf := range missingDirs { + absPath, err := filepath.Abs(filepath.Join(args.Path, rf.relPath)) + if err != nil { + return fmt.Errorf("Failed to determine local absolute path: %s", err) + } + fmt.Fprintf(args.Out, "[%04d/%04d] Creating directory %s\n", i+1, missingCount, filepath.Join(filepath.Base(args.Path), rf.relPath)) - if args.DryRun { - continue - } + if args.DryRun { + continue + } - os.MkdirAll(absPath, 0775) - } + os.MkdirAll(absPath, 0775) + } - return nil + return nil } func (self *Drive) downloadMissingFiles(files *syncFiles, args DownloadSyncArgs) error { - missingFiles := files.filterMissingLocalFiles() - missingCount := len(missingFiles) - - if missingCount > 0 { - fmt.Fprintf(args.Out, "\n%d local files are missing\n", missingCount) - } - - for i, rf := range missingFiles { - absPath, err := filepath.Abs(filepath.Join(args.Path, rf.relPath)) - if err != nil { - return fmt.Errorf("Failed to determine local absolute path: %s", err) - } - fmt.Fprintf(args.Out, "[%04d/%04d] Downloading %s -> %s\n", i + 1, missingCount, rf.relPath, filepath.Join(filepath.Base(args.Path), rf.relPath)) - - err = self.downloadRemoteFile(rf.file.Id, absPath, args, 0) - if err != nil { - return err - } - } - - return nil + missingFiles := files.filterMissingLocalFiles() + missingCount := len(missingFiles) + + if missingCount > 0 { + fmt.Fprintf(args.Out, "\n%d local files are missing\n", missingCount) + } + + for i, rf := range missingFiles { + absPath, err := filepath.Abs(filepath.Join(args.Path, rf.relPath)) + if err != nil { + return fmt.Errorf("Failed to determine local absolute path: %s", err) + } + fmt.Fprintf(args.Out, "[%04d/%04d] Downloading %s -> %s\n", i+1, missingCount, rf.relPath, filepath.Join(filepath.Base(args.Path), rf.relPath)) + + err = self.downloadRemoteFile(rf.file.Id, absPath, args, 0) + if err != nil { + return err + } + } + + return nil } func (self *Drive) downloadChangedFiles(changedFiles []*changedFile, args DownloadSyncArgs) error { - changedCount := len(changedFiles) - - if changedCount > 0 { - fmt.Fprintf(args.Out, "\n%d remote files has changed\n", changedCount) - } - - for i, cf := range changedFiles { - if skip, reason := checkLocalConflict(cf, args.Resolution); skip { - fmt.Fprintf(args.Out, "[%04d/%04d] Skipping %s (%s)\n", i + 1, changedCount, cf.remote.relPath, reason) - continue - } - - absPath, err := filepath.Abs(filepath.Join(args.Path, cf.remote.relPath)) - if err != nil { - return fmt.Errorf("Failed to determine local absolute path: %s", err) - } - fmt.Fprintf(args.Out, "[%04d/%04d] Downloading %s -> %s\n", i + 1, changedCount, cf.remote.relPath, filepath.Join(filepath.Base(args.Path), cf.remote.relPath)) - - err = self.downloadRemoteFile(cf.remote.file.Id, absPath, args, 0) - if err != nil { - return err - } - } - - return nil + changedCount := len(changedFiles) + + if changedCount > 0 { + fmt.Fprintf(args.Out, "\n%d remote files has changed\n", changedCount) + } + + for i, cf := range changedFiles { + if skip, reason := checkLocalConflict(cf, args.Resolution); skip { + fmt.Fprintf(args.Out, "[%04d/%04d] Skipping %s (%s)\n", i+1, changedCount, cf.remote.relPath, reason) + continue + } + + absPath, err := filepath.Abs(filepath.Join(args.Path, cf.remote.relPath)) + if err != nil { + return fmt.Errorf("Failed to determine local absolute path: %s", err) + } + fmt.Fprintf(args.Out, "[%04d/%04d] Downloading %s -> %s\n", i+1, changedCount, cf.remote.relPath, filepath.Join(filepath.Base(args.Path), cf.remote.relPath)) + + err = self.downloadRemoteFile(cf.remote.file.Id, absPath, args, 0) + if err != nil { + return err + } + } + + return nil } func (self *Drive) downloadRemoteFile(id, fpath string, args DownloadSyncArgs, try int) error { - if args.DryRun { - return nil - } - - // Get timeout reader wrapper and context - timeoutReaderWrapper, ctx := getTimeoutReaderWrapperContext() - - res, err := self.service.Files.Get(id).Context(ctx).Download() - if err != nil { - if isBackendError(err) && try < MaxBackendErrorRetries { - exponentialBackoffSleep(try) - try++ - return self.downloadRemoteFile(id, fpath, args, try) - } else { - return fmt.Errorf("Failed to download file: %s", err) - } - } - - // Close body on function exit - defer res.Body.Close() - - // Wrap response body in progress reader - progressReader := getProgressReader(res.Body, args.Progress, res.ContentLength) - - // Wrap reader in timeout reader - reader := timeoutReaderWrapper(progressReader) - - // Ensure any parent directories exists - if err = mkdir(fpath); err != nil { - return err - } - - // Download to tmp file - tmpPath := fpath + ".incomplete" - - // Create new file - outFile, err := os.Create(tmpPath) - if err != nil { - return fmt.Errorf("Unable to create local file: %s", err) - } - - // Save file to disk - _, err = io.Copy(outFile, reader) - if err != nil { - outFile.Close() - if try < MaxBackendErrorRetries { - exponentialBackoffSleep(try) - try++ - return self.downloadRemoteFile(id, fpath, args, try) - } else { - os.Remove(tmpPath) - return fmt.Errorf("Download was interrupted: %s", err) - } - } - - // Close file - outFile.Close() - - // Rename tmp file to proper filename - return os.Rename(tmpPath, fpath) + if args.DryRun { + return nil + } + + // Get timeout reader wrapper and context + timeoutReaderWrapper, ctx := getTimeoutReaderWrapperContext() + + res, err := self.service.Files.Get(id).Context(ctx).Download() + if err != nil { + if isBackendError(err) && try < MaxBackendErrorRetries { + exponentialBackoffSleep(try) + try++ + return self.downloadRemoteFile(id, fpath, args, try) + } else { + return fmt.Errorf("Failed to download file: %s", err) + } + } + + // Close body on function exit + defer res.Body.Close() + + // Wrap response body in progress reader + progressReader := getProgressReader(res.Body, args.Progress, res.ContentLength) + + // Wrap reader in timeout reader + reader := timeoutReaderWrapper(progressReader) + + // Ensure any parent directories exists + if err = mkdir(fpath); err != nil { + return err + } + + // Download to tmp file + tmpPath := fpath + ".incomplete" + + // Create new file + outFile, err := os.Create(tmpPath) + if err != nil { + return fmt.Errorf("Unable to create local file: %s", err) + } + + // Save file to disk + _, err = io.Copy(outFile, reader) + if err != nil { + outFile.Close() + if try < MaxBackendErrorRetries { + exponentialBackoffSleep(try) + try++ + return self.downloadRemoteFile(id, fpath, args, try) + } else { + os.Remove(tmpPath) + return fmt.Errorf("Download was interrupted: %s", err) + } + } + + // Close file + outFile.Close() + + // Rename tmp file to proper filename + return os.Rename(tmpPath, fpath) } func (self *Drive) deleteExtraneousLocalFiles(files *syncFiles, args DownloadSyncArgs) error { - extraneousFiles := files.filterExtraneousLocalFiles() - extraneousCount := len(extraneousFiles) + extraneousFiles := files.filterExtraneousLocalFiles() + extraneousCount := len(extraneousFiles) - if extraneousCount > 0 { - fmt.Fprintf(args.Out, "\n%d local files are extraneous\n", extraneousCount) - } + if extraneousCount > 0 { + fmt.Fprintf(args.Out, "\n%d local files are extraneous\n", extraneousCount) + } - // Sort files so that the files with the longest path comes first - sort.Sort(sort.Reverse(byLocalPathLength(extraneousFiles))) + // Sort files so that the files with the longest path comes first + sort.Sort(sort.Reverse(byLocalPathLength(extraneousFiles))) - for i, lf := range extraneousFiles { - fmt.Fprintf(args.Out, "[%04d/%04d] Deleting %s\n", i + 1, extraneousCount, lf.absPath) + for i, lf := range extraneousFiles { + fmt.Fprintf(args.Out, "[%04d/%04d] Deleting %s\n", i+1, extraneousCount, lf.absPath) - if args.DryRun { - continue - } + if args.DryRun { + continue + } - err := os.Remove(lf.absPath) - if err != nil { - return fmt.Errorf("Failed to delete local file: %s", err) - } - } + err := os.Remove(lf.absPath) + if err != nil { + return fmt.Errorf("Failed to delete local file: %s", err) + } + } - return nil + return nil } func checkLocalConflict(cf *changedFile, resolution ConflictResolution) (bool, string) { - // No conflict unless local file was last modified - if cf.compareModTime() != LocalLastModified { - return false, "" - } - - // Don't skip if want to keep the remote file - if resolution == KeepRemote { - return false, "" - } - - // Skip if we want to keep the local file - if resolution == KeepLocal { - return true, "conflicting file, keeping local file" - } - - if resolution == KeepLargest { - largest := cf.compareSize() - - // Skip if the local file is largest - if largest == LocalLargestSize { - return true, "conflicting file, local file is largest, keeping local" - } - - // Don't skip if the remote file is largest - if largest == RemoteLargestSize { - return false, "" - } - - // Keep local if both files have the same size - if largest == EqualSize { - return true, "conflicting file, file sizes are equal, keeping local" - } - } - - // The conditionals above should cover all cases, - // unless the programmer did something wrong, - // in which case we default to being non-destructive and skip the file - return true, "conflicting file, unhandled case" + // No conflict unless local file was last modified + if cf.compareModTime() != LocalLastModified { + return false, "" + } + + // Don't skip if want to keep the remote file + if resolution == KeepRemote { + return false, "" + } + + // Skip if we want to keep the local file + if resolution == KeepLocal { + return true, "conflicting file, keeping local file" + } + + if resolution == KeepLargest { + largest := cf.compareSize() + + // Skip if the local file is largest + if largest == LocalLargestSize { + return true, "conflicting file, local file is largest, keeping local" + } + + // Don't skip if the remote file is largest + if largest == RemoteLargestSize { + return false, "" + } + + // Keep local if both files have the same size + if largest == EqualSize { + return true, "conflicting file, file sizes are equal, keeping local" + } + } + + // The conditionals above should cover all cases, + // unless the programmer did something wrong, + // in which case we default to being non-destructive and skip the file + return true, "conflicting file, unhandled case" } func ensureNoLocalModifications(files []*changedFile) error { - conflicts := findLocalConflicts(files) - if len(conflicts) == 0 { - return nil - } - - buffer := bytes.NewBufferString("") - formatConflicts(conflicts, buffer) - return fmt.Errorf(buffer.String()) + conflicts := findLocalConflicts(files) + if len(conflicts) == 0 { + return nil + } + + buffer := bytes.NewBufferString("") + formatConflicts(conflicts, buffer) + return fmt.Errorf(buffer.String()) } diff --git a/drive/sync_list.go b/drive/sync_list.go index e035239..c9b84fb 100644 --- a/drive/sync_list.go +++ b/drive/sync_list.go @@ -1,97 +1,97 @@ package drive import ( - "fmt" - "sort" - "io" - "text/tabwriter" - "google.golang.org/api/googleapi" - "google.golang.org/api/drive/v3" + "fmt" + "google.golang.org/api/drive/v3" + "google.golang.org/api/googleapi" + "io" + "sort" + "text/tabwriter" ) type ListSyncArgs struct { - Out io.Writer - SkipHeader bool + Out io.Writer + SkipHeader bool } func (self *Drive) ListSync(args ListSyncArgs) error { - listArgs := listAllFilesArgs{ - query: "appProperties has {key='syncRoot' and value='true'}", - fields: []googleapi.Field{"nextPageToken", "files(id,name,mimeType,createdTime)"}, - } - files, err := self.listAllFiles(listArgs) - if err != nil { - return err - } - printSyncDirectories(files, args) - return nil + listArgs := listAllFilesArgs{ + query: "appProperties has {key='syncRoot' and value='true'}", + fields: []googleapi.Field{"nextPageToken", "files(id,name,mimeType,createdTime)"}, + } + files, err := self.listAllFiles(listArgs) + if err != nil { + return err + } + printSyncDirectories(files, args) + return nil } type ListRecursiveSyncArgs struct { - Out io.Writer - RootId string - SkipHeader bool - PathWidth int64 - SizeInBytes bool - SortOrder string + Out io.Writer + RootId string + SkipHeader bool + PathWidth int64 + SizeInBytes bool + SortOrder string } func (self *Drive) ListRecursiveSync(args ListRecursiveSyncArgs) error { - rootDir, err := self.getSyncRoot(args.RootId) - if err != nil { - return err - } - - files, err := self.prepareRemoteFiles(rootDir, args.SortOrder) - if err != nil { - return err - } - - printSyncDirContent(files, args) - return nil + rootDir, err := self.getSyncRoot(args.RootId) + if err != nil { + return err + } + + files, err := self.prepareRemoteFiles(rootDir, args.SortOrder) + if err != nil { + return err + } + + printSyncDirContent(files, args) + return nil } func printSyncDirectories(files []*drive.File, args ListSyncArgs) { - w := new(tabwriter.Writer) - w.Init(args.Out, 0, 0, 3, ' ', 0) - - if !args.SkipHeader { - fmt.Fprintln(w, "Id\tName\tCreated") - } - - for _, f := range files { - fmt.Fprintf(w, "%s\t%s\t%s\n", - f.Id, - f.Name, - formatDatetime(f.CreatedTime), - ) - } - - w.Flush() + w := new(tabwriter.Writer) + w.Init(args.Out, 0, 0, 3, ' ', 0) + + if !args.SkipHeader { + fmt.Fprintln(w, "Id\tName\tCreated") + } + + for _, f := range files { + fmt.Fprintf(w, "%s\t%s\t%s\n", + f.Id, + f.Name, + formatDatetime(f.CreatedTime), + ) + } + + w.Flush() } func printSyncDirContent(files []*RemoteFile, args ListRecursiveSyncArgs) { - if args.SortOrder == "" { - // Sort files by path - sort.Sort(byRemotePath(files)) - } - - w := new(tabwriter.Writer) - w.Init(args.Out, 0, 0, 3, ' ', 0) - - if !args.SkipHeader { - fmt.Fprintln(w, "Id\tPath\tType\tSize\tModified") - } - - for _, rf := range files { - fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", - rf.file.Id, - truncateString(rf.relPath, int(args.PathWidth)), - filetype(rf.file), - formatSize(rf.file.Size, args.SizeInBytes), - formatDatetime(rf.file.ModifiedTime), - ) - } - - w.Flush() + if args.SortOrder == "" { + // Sort files by path + sort.Sort(byRemotePath(files)) + } + + w := new(tabwriter.Writer) + w.Init(args.Out, 0, 0, 3, ' ', 0) + + if !args.SkipHeader { + fmt.Fprintln(w, "Id\tPath\tType\tSize\tModified") + } + + for _, rf := range files { + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", + rf.file.Id, + truncateString(rf.relPath, int(args.PathWidth)), + filetype(rf.file), + formatSize(rf.file.Size, args.SizeInBytes), + formatDatetime(rf.file.ModifiedTime), + ) + } + + w.Flush() } diff --git a/drive/sync_upload.go b/drive/sync_upload.go index 96442e1..0d5c208 100644 --- a/drive/sync_upload.go +++ b/drive/sync_upload.go @@ -1,476 +1,475 @@ package drive import ( - "fmt" - "io" - "os" - "time" - "sort" - "bytes" - "path/filepath" - "google.golang.org/api/googleapi" - "google.golang.org/api/drive/v3" + "bytes" + "fmt" + "google.golang.org/api/drive/v3" + "google.golang.org/api/googleapi" + "io" + "os" + "path/filepath" + "sort" + "time" ) type UploadSyncArgs struct { - Out io.Writer - Progress io.Writer - Path string - RootId string - DryRun bool - DeleteExtraneous bool - ChunkSize int64 - Resolution ConflictResolution - Comparer FileComparer + Out io.Writer + Progress io.Writer + Path string + RootId string + DryRun bool + DeleteExtraneous bool + ChunkSize int64 + Resolution ConflictResolution + Comparer FileComparer } func (self *Drive) UploadSync(args UploadSyncArgs) error { - if args.ChunkSize > intMax() - 1 { - return fmt.Errorf("Chunk size is to big, max chunk size for this computer is %d", intMax() - 1) - } - - fmt.Fprintln(args.Out, "Starting sync...") - started := time.Now() - - // Create root directory if it does not exist - rootDir, err := self.prepareSyncRoot(args) - if err != nil { - return err - } - - fmt.Fprintln(args.Out, "Collecting local and remote file information...") - files, err := self.prepareSyncFiles(args.Path, rootDir, args.Comparer) - if err != nil { - return err - } - - // Find missing and changed files - changedFiles := files.filterChangedLocalFiles() - missingFiles := files.filterMissingRemoteFiles() - - fmt.Fprintf(args.Out, "Found %d local files and %d remote files\n", len(files.local), len(files.remote)) - - // Ensure that there is enough free space on drive - if ok, msg := self.checkRemoteFreeSpace(missingFiles, changedFiles); !ok { - return fmt.Errorf(msg) - } - - // Ensure that we don't overwrite any remote changes - if args.Resolution == NoResolution { - err = ensureNoRemoteModifications(changedFiles) - if err != nil { - return fmt.Errorf("Conflict detected!\nThe following files have changed and the remote file are newer than it's local counterpart:\n\n%s\nNo conflict resolution was given, aborting...", err) - } - } - - // Create missing directories - files, err = self.createMissingRemoteDirs(files, args) - if err != nil { - return err - } - - // Upload missing files - err = self.uploadMissingFiles(missingFiles, files, args) - if err != nil { - return err - } - - // Update modified files - err = self.updateChangedFiles(changedFiles, rootDir, args) - if err != nil { - return err - } - - // Delete extraneous files on drive - if args.DeleteExtraneous { - err = self.deleteExtraneousRemoteFiles(files, args) - if err != nil { - return err - } - } - fmt.Fprintf(args.Out, "Sync finished in %s\n", time.Since(started)) - - return nil + if args.ChunkSize > intMax()-1 { + return fmt.Errorf("Chunk size is to big, max chunk size for this computer is %d", intMax()-1) + } + + fmt.Fprintln(args.Out, "Starting sync...") + started := time.Now() + + // Create root directory if it does not exist + rootDir, err := self.prepareSyncRoot(args) + if err != nil { + return err + } + + fmt.Fprintln(args.Out, "Collecting local and remote file information...") + files, err := self.prepareSyncFiles(args.Path, rootDir, args.Comparer) + if err != nil { + return err + } + + // Find missing and changed files + changedFiles := files.filterChangedLocalFiles() + missingFiles := files.filterMissingRemoteFiles() + + fmt.Fprintf(args.Out, "Found %d local files and %d remote files\n", len(files.local), len(files.remote)) + + // Ensure that there is enough free space on drive + if ok, msg := self.checkRemoteFreeSpace(missingFiles, changedFiles); !ok { + return fmt.Errorf(msg) + } + + // Ensure that we don't overwrite any remote changes + if args.Resolution == NoResolution { + err = ensureNoRemoteModifications(changedFiles) + if err != nil { + return fmt.Errorf("Conflict detected!\nThe following files have changed and the remote file are newer than it's local counterpart:\n\n%s\nNo conflict resolution was given, aborting...", err) + } + } + + // Create missing directories + files, err = self.createMissingRemoteDirs(files, args) + if err != nil { + return err + } + + // Upload missing files + err = self.uploadMissingFiles(missingFiles, files, args) + if err != nil { + return err + } + + // Update modified files + err = self.updateChangedFiles(changedFiles, rootDir, args) + if err != nil { + return err + } + + // Delete extraneous files on drive + if args.DeleteExtraneous { + err = self.deleteExtraneousRemoteFiles(files, args) + if err != nil { + return err + } + } + fmt.Fprintf(args.Out, "Sync finished in %s\n", time.Since(started)) + + return nil } func (self *Drive) prepareSyncRoot(args UploadSyncArgs) (*drive.File, error) { - fields := []googleapi.Field{"id", "name", "mimeType", "appProperties"} - f, err := self.service.Files.Get(args.RootId).Fields(fields...).Do() - if err != nil { - return nil, fmt.Errorf("Failed to find root dir: %s", err) - } - - // Ensure file is a directory - if !isDir(f) { - return nil, fmt.Errorf("Provided root id is not a directory") - } - - // Return directory if syncRoot property is already set - if _, ok := f.AppProperties["syncRoot"]; ok { - return f, nil - } - - // This is the first time this directory have been used for sync - // Check if the directory is empty - isEmpty, err := self.dirIsEmpty(f.Id) - if err != nil { - return nil, fmt.Errorf("Failed to check if root dir is empty: %s", err) - } - - // Ensure that the directory is empty - if !isEmpty { - return nil, fmt.Errorf("Root directoy is not empty, the initial sync requires an empty directory") - } - - // Update directory with syncRoot property - dstFile := &drive.File{ - AppProperties: map[string]string{"sync": "true", "syncRoot": "true"}, - } - - f, err = self.service.Files.Update(f.Id, dstFile).Fields(fields...).Do() - if err != nil { - return nil, fmt.Errorf("Failed to update root directory: %s", err) - } - - return f, nil + fields := []googleapi.Field{"id", "name", "mimeType", "appProperties"} + f, err := self.service.Files.Get(args.RootId).Fields(fields...).Do() + if err != nil { + return nil, fmt.Errorf("Failed to find root dir: %s", err) + } + + // Ensure file is a directory + if !isDir(f) { + return nil, fmt.Errorf("Provided root id is not a directory") + } + + // Return directory if syncRoot property is already set + if _, ok := f.AppProperties["syncRoot"]; ok { + return f, nil + } + + // This is the first time this directory have been used for sync + // Check if the directory is empty + isEmpty, err := self.dirIsEmpty(f.Id) + if err != nil { + return nil, fmt.Errorf("Failed to check if root dir is empty: %s", err) + } + + // Ensure that the directory is empty + if !isEmpty { + return nil, fmt.Errorf("Root directoy is not empty, the initial sync requires an empty directory") + } + + // Update directory with syncRoot property + dstFile := &drive.File{ + AppProperties: map[string]string{"sync": "true", "syncRoot": "true"}, + } + + f, err = self.service.Files.Update(f.Id, dstFile).Fields(fields...).Do() + if err != nil { + return nil, fmt.Errorf("Failed to update root directory: %s", err) + } + + return f, nil } func (self *Drive) createMissingRemoteDirs(files *syncFiles, args UploadSyncArgs) (*syncFiles, error) { - missingDirs := files.filterMissingRemoteDirs() - missingCount := len(missingDirs) - - if missingCount > 0 { - fmt.Fprintf(args.Out, "\n%d remote directories are missing\n", missingCount) - } - - // Sort directories so that the dirs with the shortest path comes first - sort.Sort(byLocalPathLength(missingDirs)) - - for i, lf := range missingDirs { - parentPath := parentFilePath(lf.relPath) - parent, ok := files.findRemoteByPath(parentPath) - if !ok { - return nil, fmt.Errorf("Could not find remote directory with path '%s'", parentPath) - } - - fmt.Fprintf(args.Out, "[%04d/%04d] Creating directory %s\n", i + 1, missingCount, filepath.Join(files.root.file.Name, lf.relPath)) - - f, err := self.createMissingRemoteDir(createMissingRemoteDirArgs{ - name: lf.info.Name(), - parentId: parent.file.Id, - rootId: args.RootId, - dryRun: args.DryRun, - try: 0, - }) - if err != nil { - return nil, err - } - - files.remote = append(files.remote, &RemoteFile{ - relPath: lf.relPath, - file: f, - }) - } - - return files, nil + missingDirs := files.filterMissingRemoteDirs() + missingCount := len(missingDirs) + + if missingCount > 0 { + fmt.Fprintf(args.Out, "\n%d remote directories are missing\n", missingCount) + } + + // Sort directories so that the dirs with the shortest path comes first + sort.Sort(byLocalPathLength(missingDirs)) + + for i, lf := range missingDirs { + parentPath := parentFilePath(lf.relPath) + parent, ok := files.findRemoteByPath(parentPath) + if !ok { + return nil, fmt.Errorf("Could not find remote directory with path '%s'", parentPath) + } + + fmt.Fprintf(args.Out, "[%04d/%04d] Creating directory %s\n", i+1, missingCount, filepath.Join(files.root.file.Name, lf.relPath)) + + f, err := self.createMissingRemoteDir(createMissingRemoteDirArgs{ + name: lf.info.Name(), + parentId: parent.file.Id, + rootId: args.RootId, + dryRun: args.DryRun, + try: 0, + }) + if err != nil { + return nil, err + } + + files.remote = append(files.remote, &RemoteFile{ + relPath: lf.relPath, + file: f, + }) + } + + return files, nil } type createMissingRemoteDirArgs struct { - name string - parentId string - rootId string - dryRun bool - try int + name string + parentId string + rootId string + dryRun bool + try int } func (self *Drive) uploadMissingFiles(missingFiles []*LocalFile, files *syncFiles, args UploadSyncArgs) error { - missingCount := len(missingFiles) + missingCount := len(missingFiles) - if missingCount > 0 { - fmt.Fprintf(args.Out, "\n%d remote files are missing\n", missingCount) - } + if missingCount > 0 { + fmt.Fprintf(args.Out, "\n%d remote files are missing\n", missingCount) + } - for i, lf := range missingFiles { - parentPath := parentFilePath(lf.relPath) - parent, ok := files.findRemoteByPath(parentPath) - if !ok { - return fmt.Errorf("Could not find remote directory with path '%s'", parentPath) - } + for i, lf := range missingFiles { + parentPath := parentFilePath(lf.relPath) + parent, ok := files.findRemoteByPath(parentPath) + if !ok { + return fmt.Errorf("Could not find remote directory with path '%s'", parentPath) + } - fmt.Fprintf(args.Out, "[%04d/%04d] Uploading %s -> %s\n", i + 1, missingCount, lf.relPath, filepath.Join(files.root.file.Name, lf.relPath)) + fmt.Fprintf(args.Out, "[%04d/%04d] Uploading %s -> %s\n", i+1, missingCount, lf.relPath, filepath.Join(files.root.file.Name, lf.relPath)) - err := self.uploadMissingFile(parent.file.Id, lf, args, 0) - if err != nil { - return err - } - } + err := self.uploadMissingFile(parent.file.Id, lf, args, 0) + if err != nil { + return err + } + } - return nil + return nil } func (self *Drive) updateChangedFiles(changedFiles []*changedFile, root *drive.File, args UploadSyncArgs) error { - changedCount := len(changedFiles) + changedCount := len(changedFiles) - if changedCount > 0 { - fmt.Fprintf(args.Out, "\n%d local files has changed\n", changedCount) - } + if changedCount > 0 { + fmt.Fprintf(args.Out, "\n%d local files has changed\n", changedCount) + } - for i, cf := range changedFiles { - if skip, reason := checkRemoteConflict(cf, args.Resolution); skip { - fmt.Fprintf(args.Out, "[%04d/%04d] Skipping %s (%s)\n", i + 1, changedCount, cf.local.relPath, reason) - continue - } + for i, cf := range changedFiles { + if skip, reason := checkRemoteConflict(cf, args.Resolution); skip { + fmt.Fprintf(args.Out, "[%04d/%04d] Skipping %s (%s)\n", i+1, changedCount, cf.local.relPath, reason) + continue + } - fmt.Fprintf(args.Out, "[%04d/%04d] Updating %s -> %s\n", i + 1, changedCount, cf.local.relPath, filepath.Join(root.Name, cf.local.relPath)) + fmt.Fprintf(args.Out, "[%04d/%04d] Updating %s -> %s\n", i+1, changedCount, cf.local.relPath, filepath.Join(root.Name, cf.local.relPath)) - err := self.updateChangedFile(cf, args, 0) - if err != nil { - return err - } - } + err := self.updateChangedFile(cf, args, 0) + if err != nil { + return err + } + } - return nil + return nil } func (self *Drive) deleteExtraneousRemoteFiles(files *syncFiles, args UploadSyncArgs) error { - extraneousFiles := files.filterExtraneousRemoteFiles() - extraneousCount := len(extraneousFiles) + extraneousFiles := files.filterExtraneousRemoteFiles() + extraneousCount := len(extraneousFiles) - if extraneousCount > 0 { - fmt.Fprintf(args.Out, "\n%d remote files are extraneous\n", extraneousCount) - } + if extraneousCount > 0 { + fmt.Fprintf(args.Out, "\n%d remote files are extraneous\n", extraneousCount) + } - // Sort files so that the files with the longest path comes first - sort.Sort(sort.Reverse(byRemotePathLength(extraneousFiles))) + // Sort files so that the files with the longest path comes first + sort.Sort(sort.Reverse(byRemotePathLength(extraneousFiles))) - for i, rf := range extraneousFiles { - fmt.Fprintf(args.Out, "[%04d/%04d] Deleting %s\n", i + 1, extraneousCount, filepath.Join(files.root.file.Name, rf.relPath)) + for i, rf := range extraneousFiles { + fmt.Fprintf(args.Out, "[%04d/%04d] Deleting %s\n", i+1, extraneousCount, filepath.Join(files.root.file.Name, rf.relPath)) - err := self.deleteRemoteFile(rf, args, 0) - if err != nil { - return err - } - } + err := self.deleteRemoteFile(rf, args, 0) + if err != nil { + return err + } + } - return nil + return nil } func (self *Drive) createMissingRemoteDir(args createMissingRemoteDirArgs) (*drive.File, error) { - dstFile := &drive.File{ - Name: args.name, - MimeType: DirectoryMimeType, - Parents: []string{args.parentId}, - AppProperties: map[string]string{"sync": "true", "syncRootId": args.rootId}, - } - - if args.dryRun { - return dstFile, nil - } - - f, err := self.service.Files.Create(dstFile).Do() - if err != nil { - if isBackendError(err) && args.try < MaxBackendErrorRetries { - exponentialBackoffSleep(args.try) - args.try++ - return self.createMissingRemoteDir(args) - } else { - return nil, fmt.Errorf("Failed to create directory: %s", err) - } - } - - return f, nil + dstFile := &drive.File{ + Name: args.name, + MimeType: DirectoryMimeType, + Parents: []string{args.parentId}, + AppProperties: map[string]string{"sync": "true", "syncRootId": args.rootId}, + } + + if args.dryRun { + return dstFile, nil + } + + f, err := self.service.Files.Create(dstFile).Do() + if err != nil { + if isBackendError(err) && args.try < MaxBackendErrorRetries { + exponentialBackoffSleep(args.try) + args.try++ + return self.createMissingRemoteDir(args) + } else { + return nil, fmt.Errorf("Failed to create directory: %s", err) + } + } + + return f, nil } func (self *Drive) uploadMissingFile(parentId string, lf *LocalFile, args UploadSyncArgs, try int) error { - if args.DryRun { - return nil - } - - srcFile, err := os.Open(lf.absPath) - if err != nil { - return fmt.Errorf("Failed to open file: %s", err) - } - - // Close file on function exit - defer srcFile.Close() - - // Instantiate drive file - dstFile := &drive.File{ - Name: lf.info.Name(), - Parents: []string{parentId}, - AppProperties: map[string]string{"sync": "true", "syncRootId": args.RootId}, - } - - // Chunk size option - chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) - - // Wrap file in progress reader - progressReader := getProgressReader(srcFile, args.Progress, lf.info.Size()) - - // Wrap reader in timeout reader - reader, ctx := getTimeoutReaderContext(progressReader) - - _, err = self.service.Files.Create(dstFile).Fields("id", "name", "size", "md5Checksum").Context(ctx).Media(reader, chunkSize).Do() - if err != nil { - if isBackendError(err) && try < MaxBackendErrorRetries { - exponentialBackoffSleep(try) - try++ - return self.uploadMissingFile(parentId, lf, args, try) - } else { - return fmt.Errorf("Failed to upload file: %s", err) - } - } - - return nil + if args.DryRun { + return nil + } + + srcFile, err := os.Open(lf.absPath) + if err != nil { + return fmt.Errorf("Failed to open file: %s", err) + } + + // Close file on function exit + defer srcFile.Close() + + // Instantiate drive file + dstFile := &drive.File{ + Name: lf.info.Name(), + Parents: []string{parentId}, + AppProperties: map[string]string{"sync": "true", "syncRootId": args.RootId}, + } + + // Chunk size option + chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) + + // Wrap file in progress reader + progressReader := getProgressReader(srcFile, args.Progress, lf.info.Size()) + + // Wrap reader in timeout reader + reader, ctx := getTimeoutReaderContext(progressReader) + + _, err = self.service.Files.Create(dstFile).Fields("id", "name", "size", "md5Checksum").Context(ctx).Media(reader, chunkSize).Do() + if err != nil { + if isBackendError(err) && try < MaxBackendErrorRetries { + exponentialBackoffSleep(try) + try++ + return self.uploadMissingFile(parentId, lf, args, try) + } else { + return fmt.Errorf("Failed to upload file: %s", err) + } + } + + return nil } func (self *Drive) updateChangedFile(cf *changedFile, args UploadSyncArgs, try int) error { - if args.DryRun { - return nil - } - - srcFile, err := os.Open(cf.local.absPath) - if err != nil { - return fmt.Errorf("Failed to open file: %s", err) - } - - // Close file on function exit - defer srcFile.Close() - - // Instantiate drive file - dstFile := &drive.File{} - - // Chunk size option - chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) - - // Wrap file in progress reader - progressReader := getProgressReader(srcFile, args.Progress, cf.local.info.Size()) - - // Wrap reader in timeout reader - reader, ctx := getTimeoutReaderContext(progressReader) - - _, err = self.service.Files.Update(cf.remote.file.Id, dstFile).Context(ctx).Media(reader, chunkSize).Do() - if err != nil { - if isBackendError(err) && try < MaxBackendErrorRetries { - exponentialBackoffSleep(try) - try++ - return self.updateChangedFile(cf, args, try) - } else { - return fmt.Errorf("Failed to update file: %s", err) - } - } - - return nil + if args.DryRun { + return nil + } + + srcFile, err := os.Open(cf.local.absPath) + if err != nil { + return fmt.Errorf("Failed to open file: %s", err) + } + + // Close file on function exit + defer srcFile.Close() + + // Instantiate drive file + dstFile := &drive.File{} + + // Chunk size option + chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) + + // Wrap file in progress reader + progressReader := getProgressReader(srcFile, args.Progress, cf.local.info.Size()) + + // Wrap reader in timeout reader + reader, ctx := getTimeoutReaderContext(progressReader) + + _, err = self.service.Files.Update(cf.remote.file.Id, dstFile).Context(ctx).Media(reader, chunkSize).Do() + if err != nil { + if isBackendError(err) && try < MaxBackendErrorRetries { + exponentialBackoffSleep(try) + try++ + return self.updateChangedFile(cf, args, try) + } else { + return fmt.Errorf("Failed to update file: %s", err) + } + } + + return nil } func (self *Drive) deleteRemoteFile(rf *RemoteFile, args UploadSyncArgs, try int) error { - if args.DryRun { - return nil - } - - - err := self.service.Files.Delete(rf.file.Id).Do() - if err != nil { - if isBackendError(err) && try < MaxBackendErrorRetries { - exponentialBackoffSleep(try) - try++ - return self.deleteRemoteFile(rf, args, try) - } else { - return fmt.Errorf("Failed to delete file: %s", err) - } - } - - return nil + if args.DryRun { + return nil + } + + err := self.service.Files.Delete(rf.file.Id).Do() + if err != nil { + if isBackendError(err) && try < MaxBackendErrorRetries { + exponentialBackoffSleep(try) + try++ + return self.deleteRemoteFile(rf, args, try) + } else { + return fmt.Errorf("Failed to delete file: %s", err) + } + } + + return nil } func (self *Drive) dirIsEmpty(id string) (bool, error) { - query := fmt.Sprintf("'%s' in parents", id) - fileList, err := self.service.Files.List().Q(query).Do() - if err != nil { - return false, fmt.Errorf("Empty dir check failed: ", err) - } + query := fmt.Sprintf("'%s' in parents", id) + fileList, err := self.service.Files.List().Q(query).Do() + if err != nil { + return false, fmt.Errorf("Empty dir check failed: ", err) + } - return len(fileList.Files) == 0, nil + return len(fileList.Files) == 0, nil } func checkRemoteConflict(cf *changedFile, resolution ConflictResolution) (bool, string) { - // No conflict unless remote file was last modified - if cf.compareModTime() != RemoteLastModified { - return false, "" - } - - // Don't skip if want to keep the local file - if resolution == KeepLocal { - return false, "" - } - - // Skip if we want to keep the remote file - if resolution == KeepRemote { - return true, "conflicting file, keeping remote file" - } - - if resolution == KeepLargest { - largest := cf.compareSize() - - // Skip if the remote file is largest - if largest == RemoteLargestSize { - return true, "conflicting file, remote file is largest, keeping remote" - } - - // Don't skip if the local file is largest - if largest == LocalLargestSize { - return false, "" - } - - // Keep remote if both files have the same size - if largest == EqualSize { - return true, "conflicting file, file sizes are equal, keeping remote" - } - } - - // The conditionals above should cover all cases, - // unless the programmer did something wrong, - // in which case we default to being non-destructive and skip the file - return true, "conflicting file, unhandled case" + // No conflict unless remote file was last modified + if cf.compareModTime() != RemoteLastModified { + return false, "" + } + + // Don't skip if want to keep the local file + if resolution == KeepLocal { + return false, "" + } + + // Skip if we want to keep the remote file + if resolution == KeepRemote { + return true, "conflicting file, keeping remote file" + } + + if resolution == KeepLargest { + largest := cf.compareSize() + + // Skip if the remote file is largest + if largest == RemoteLargestSize { + return true, "conflicting file, remote file is largest, keeping remote" + } + + // Don't skip if the local file is largest + if largest == LocalLargestSize { + return false, "" + } + + // Keep remote if both files have the same size + if largest == EqualSize { + return true, "conflicting file, file sizes are equal, keeping remote" + } + } + + // The conditionals above should cover all cases, + // unless the programmer did something wrong, + // in which case we default to being non-destructive and skip the file + return true, "conflicting file, unhandled case" } func ensureNoRemoteModifications(files []*changedFile) error { - conflicts := findRemoteConflicts(files) - if len(conflicts) == 0 { - return nil - } - - buffer := bytes.NewBufferString("") - formatConflicts(conflicts, buffer) - return fmt.Errorf(buffer.String()) + conflicts := findRemoteConflicts(files) + if len(conflicts) == 0 { + return nil + } + + buffer := bytes.NewBufferString("") + formatConflicts(conflicts, buffer) + return fmt.Errorf(buffer.String()) } func (self *Drive) checkRemoteFreeSpace(missingFiles []*LocalFile, changedFiles []*changedFile) (bool, string) { - about, err := self.service.About.Get().Fields("storageQuota").Do() - if err != nil { - return false, fmt.Sprintf("Failed to determine free space: %s", err) - } + about, err := self.service.About.Get().Fields("storageQuota").Do() + if err != nil { + return false, fmt.Sprintf("Failed to determine free space: %s", err) + } - quota := about.StorageQuota - if quota.Limit == 0 { - return true, "" - } + quota := about.StorageQuota + if quota.Limit == 0 { + return true, "" + } - freeSpace := quota.Limit - quota.Usage + freeSpace := quota.Limit - quota.Usage - var totalSize int64 + var totalSize int64 - for _, lf := range missingFiles { - totalSize += lf.Size() - } + for _, lf := range missingFiles { + totalSize += lf.Size() + } - for _, cf := range changedFiles { - totalSize += cf.local.Size() - } + for _, cf := range changedFiles { + totalSize += cf.local.Size() + } - if totalSize > freeSpace { - return false, fmt.Sprintf("Not enough free space, have %s need %s", formatSize(freeSpace, false), formatSize(totalSize, false)) - } + if totalSize > freeSpace { + return false, fmt.Sprintf("Not enough free space, have %s need %s", formatSize(freeSpace, false), formatSize(totalSize, false)) + } - return true, "" + return true, "" } diff --git a/drive/timeout_reader.go b/drive/timeout_reader.go index 878911b..9930c12 100644 --- a/drive/timeout_reader.go +++ b/drive/timeout_reader.go @@ -1,10 +1,10 @@ package drive import ( - "io" - "time" - "sync" - "golang.org/x/net/context" + "golang.org/x/net/context" + "io" + "sync" + "time" ) const MaxIdleTimeout = time.Second * 120 @@ -13,89 +13,89 @@ const TimeoutTimerInterval = time.Second * 10 type timeoutReaderWrapper func(io.Reader) io.Reader func getTimeoutReaderWrapperContext() (timeoutReaderWrapper, context.Context) { - ctx, cancel := context.WithCancel(context.TODO()) - wrapper := func(r io.Reader) io.Reader { - return getTimeoutReader(r, cancel) - } - return wrapper, ctx + ctx, cancel := context.WithCancel(context.TODO()) + wrapper := func(r io.Reader) io.Reader { + return getTimeoutReader(r, cancel) + } + return wrapper, ctx } func getTimeoutReaderContext(r io.Reader) (io.Reader, context.Context) { - ctx, cancel := context.WithCancel(context.TODO()) - return getTimeoutReader(r, cancel), ctx + ctx, cancel := context.WithCancel(context.TODO()) + return getTimeoutReader(r, cancel), ctx } func getTimeoutReader(r io.Reader, cancel context.CancelFunc) io.Reader { - return &TimeoutReader{ - reader: r, - cancel: cancel, - mutex: &sync.Mutex{}, - } + return &TimeoutReader{ + reader: r, + cancel: cancel, + mutex: &sync.Mutex{}, + } } type TimeoutReader struct { - reader io.Reader - cancel context.CancelFunc - lastActivity time.Time - timer *time.Timer - mutex *sync.Mutex - done bool + reader io.Reader + cancel context.CancelFunc + lastActivity time.Time + timer *time.Timer + mutex *sync.Mutex + done bool } func (self *TimeoutReader) Read(p []byte) (int, error) { - if self.timer == nil { - self.startTimer() - } + if self.timer == nil { + self.startTimer() + } - self.mutex.Lock() + self.mutex.Lock() - // Read - n, err := self.reader.Read(p) + // Read + n, err := self.reader.Read(p) - self.lastActivity = time.Now() - self.done = (err != nil) + self.lastActivity = time.Now() + self.done = (err != nil) - self.mutex.Unlock() + self.mutex.Unlock() - if self.done { - self.stopTimer() - } + if self.done { + self.stopTimer() + } - return n, err + return n, err } func (self *TimeoutReader) startTimer() { - self.mutex.Lock() - defer self.mutex.Unlock() + self.mutex.Lock() + defer self.mutex.Unlock() - if !self.done { - self.timer = time.AfterFunc(TimeoutTimerInterval, self.timeout) - } + if !self.done { + self.timer = time.AfterFunc(TimeoutTimerInterval, self.timeout) + } } func (self *TimeoutReader) stopTimer() { - self.mutex.Lock() - defer self.mutex.Unlock() + self.mutex.Lock() + defer self.mutex.Unlock() - if self.timer != nil { - self.timer.Stop() - } + if self.timer != nil { + self.timer.Stop() + } } func (self *TimeoutReader) timeout() { - self.mutex.Lock() + self.mutex.Lock() - if self.done { - self.mutex.Unlock() - return - } + if self.done { + self.mutex.Unlock() + return + } - if time.Since(self.lastActivity) > MaxIdleTimeout { - self.cancel() - self.mutex.Unlock() - return - } + if time.Since(self.lastActivity) > MaxIdleTimeout { + self.cancel() + self.mutex.Unlock() + return + } - self.mutex.Unlock() - self.startTimer() + self.mutex.Unlock() + self.startTimer() } diff --git a/drive/update.go b/drive/update.go index 5bdd040..156eb2f 100644 --- a/drive/update.go +++ b/drive/update.go @@ -1,75 +1,75 @@ package drive import ( - "fmt" - "mime" - "time" - "io" - "path/filepath" - "google.golang.org/api/googleapi" - "google.golang.org/api/drive/v3" + "fmt" + "google.golang.org/api/drive/v3" + "google.golang.org/api/googleapi" + "io" + "mime" + "path/filepath" + "time" ) type UpdateArgs struct { - Out io.Writer - Progress io.Writer - Id string - Path string - Name string - Parents []string - Mime string - Recursive bool - ChunkSize int64 + Out io.Writer + Progress io.Writer + Id string + Path string + Name string + Parents []string + Mime string + Recursive bool + ChunkSize int64 } func (self *Drive) Update(args UpdateArgs) error { - srcFile, srcFileInfo, err := openFile(args.Path) - if err != nil { - return fmt.Errorf("Failed to open file: %s", err) - } + srcFile, srcFileInfo, err := openFile(args.Path) + if err != nil { + return fmt.Errorf("Failed to open file: %s", err) + } - defer srcFile.Close() + defer srcFile.Close() - // Instantiate empty drive file - dstFile := &drive.File{} + // Instantiate empty drive file + dstFile := &drive.File{} - // Use provided file name or use filename - if args.Name == "" { - dstFile.Name = filepath.Base(srcFileInfo.Name()) - } else { - dstFile.Name = args.Name - } + // Use provided file name or use filename + if args.Name == "" { + dstFile.Name = filepath.Base(srcFileInfo.Name()) + } else { + dstFile.Name = args.Name + } - // Set provided mime type or get type based on file extension - if args.Mime == "" { - dstFile.MimeType = mime.TypeByExtension(filepath.Ext(dstFile.Name)) - } else { - dstFile.MimeType = args.Mime - } + // Set provided mime type or get type based on file extension + if args.Mime == "" { + dstFile.MimeType = mime.TypeByExtension(filepath.Ext(dstFile.Name)) + } else { + dstFile.MimeType = args.Mime + } - // Set parent folders - dstFile.Parents = args.Parents + // Set parent folders + dstFile.Parents = args.Parents - // Chunk size option - chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) + // Chunk size option + chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) - // Wrap file in progress reader - progressReader := getProgressReader(srcFile, args.Progress, srcFileInfo.Size()) + // Wrap file in progress reader + progressReader := getProgressReader(srcFile, args.Progress, srcFileInfo.Size()) - // Wrap reader in timeout reader - reader, ctx := getTimeoutReaderContext(progressReader) + // Wrap reader in timeout reader + reader, ctx := getTimeoutReaderContext(progressReader) - fmt.Fprintf(args.Out, "Uploading %s\n", args.Path) - started := time.Now() + fmt.Fprintf(args.Out, "Uploading %s\n", args.Path) + started := time.Now() - f, err := self.service.Files.Update(args.Id, dstFile).Fields("id", "name", "size").Context(ctx).Media(reader, chunkSize).Do() - if err != nil { - return fmt.Errorf("Failed to upload file: %s", err) - } + f, err := self.service.Files.Update(args.Id, dstFile).Fields("id", "name", "size").Context(ctx).Media(reader, chunkSize).Do() + if err != nil { + return fmt.Errorf("Failed to upload file: %s", err) + } - // Calculate average upload rate - rate := calcRate(f.Size, started, time.Now()) + // Calculate average upload rate + rate := calcRate(f.Size, started, time.Now()) - fmt.Fprintf(args.Out, "Updated %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(f.Size, false)) - return nil + fmt.Fprintf(args.Out, "Updated %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(f.Size, false)) + return nil } diff --git a/drive/upload.go b/drive/upload.go index 0bbc014..c42bebd 100644 --- a/drive/upload.go +++ b/drive/upload.go @@ -1,249 +1,249 @@ package drive import ( - "fmt" - "mime" - "os" - "io" - "time" - "path/filepath" - "google.golang.org/api/googleapi" - "google.golang.org/api/drive/v3" + "fmt" + "google.golang.org/api/drive/v3" + "google.golang.org/api/googleapi" + "io" + "mime" + "os" + "path/filepath" + "time" ) type UploadArgs struct { - Out io.Writer - Progress io.Writer - Path string - Name string - Parents []string - Mime string - Recursive bool - Share bool - Delete bool - ChunkSize int64 + Out io.Writer + Progress io.Writer + Path string + Name string + Parents []string + Mime string + Recursive bool + Share bool + Delete bool + ChunkSize int64 } func (self *Drive) Upload(args UploadArgs) error { - if args.ChunkSize > intMax() - 1 { - return fmt.Errorf("Chunk size is to big, max chunk size for this computer is %d", intMax() - 1) - } - - // Ensure that none of the parents are sync dirs - for _, parent := range args.Parents { - isSyncDir, err := self.isSyncFile(parent) - if err != nil { - return err - } - - if isSyncDir { - return fmt.Errorf("%s is a sync directory, use 'sync upload' instead", parent) - } - } - - if args.Recursive { - return self.uploadRecursive(args) - } - - info, err := os.Stat(args.Path) - if err != nil { - return fmt.Errorf("Failed stat file: %s", err) - } - - if info.IsDir() { - return fmt.Errorf("'%s' is a directory, use --recursive to upload directories", info.Name()) - } - - f, rate, err := self.uploadFile(args) - if err != nil { - return err - } - fmt.Fprintf(args.Out, "Uploaded %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(f.Size, false)) - - if args.Share { - err = self.shareAnyoneReader(f.Id) - if err != nil { - return err - } - - fmt.Fprintf(args.Out, "File is readable by anyone at %s\n", f.WebContentLink) - } - - if args.Delete { - err = os.Remove(args.Path) - if err != nil { - return fmt.Errorf("Failed to delete file: %s", err) - } - fmt.Fprintf(args.Out, "Removed %s\n", args.Path) - } - - return nil + if args.ChunkSize > intMax()-1 { + return fmt.Errorf("Chunk size is to big, max chunk size for this computer is %d", intMax()-1) + } + + // Ensure that none of the parents are sync dirs + for _, parent := range args.Parents { + isSyncDir, err := self.isSyncFile(parent) + if err != nil { + return err + } + + if isSyncDir { + return fmt.Errorf("%s is a sync directory, use 'sync upload' instead", parent) + } + } + + if args.Recursive { + return self.uploadRecursive(args) + } + + info, err := os.Stat(args.Path) + if err != nil { + return fmt.Errorf("Failed stat file: %s", err) + } + + if info.IsDir() { + return fmt.Errorf("'%s' is a directory, use --recursive to upload directories", info.Name()) + } + + f, rate, err := self.uploadFile(args) + if err != nil { + return err + } + fmt.Fprintf(args.Out, "Uploaded %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(f.Size, false)) + + if args.Share { + err = self.shareAnyoneReader(f.Id) + if err != nil { + return err + } + + fmt.Fprintf(args.Out, "File is readable by anyone at %s\n", f.WebContentLink) + } + + if args.Delete { + err = os.Remove(args.Path) + if err != nil { + return fmt.Errorf("Failed to delete file: %s", err) + } + fmt.Fprintf(args.Out, "Removed %s\n", args.Path) + } + + return nil } func (self *Drive) uploadRecursive(args UploadArgs) error { - info, err := os.Stat(args.Path) - if err != nil { - return fmt.Errorf("Failed stat file: %s", err) - } - - if info.IsDir() { - args.Name = "" - return self.uploadDirectory(args) - } else { - _, _, err := self.uploadFile(args) - return err - } + info, err := os.Stat(args.Path) + if err != nil { + return fmt.Errorf("Failed stat file: %s", err) + } + + if info.IsDir() { + args.Name = "" + return self.uploadDirectory(args) + } else { + _, _, err := self.uploadFile(args) + return err + } } func (self *Drive) uploadDirectory(args UploadArgs) error { - srcFile, srcFileInfo, err := openFile(args.Path) - if err != nil { - return err - } - - // Close file on function exit - defer srcFile.Close() - - fmt.Fprintf(args.Out, "Creating directory %s\n", srcFileInfo.Name()) - // Make directory on drive - f, err := self.mkdir(MkdirArgs{ - Out: args.Out, - Name: srcFileInfo.Name(), - Parents: args.Parents, - }) - if err != nil { - return err - } - - // Read files from directory - names, err := srcFile.Readdirnames(0) - if err != nil && err != io.EOF { - return fmt.Errorf("Failed reading directory: %s", err) - } - - for _, name := range names { - // Copy args and set new path and parents - newArgs := args - newArgs.Path = filepath.Join(args.Path, name) - newArgs.Parents = []string{f.Id} - - // Upload - err = self.uploadRecursive(newArgs) - if err != nil { - return err - } - } - - return nil + srcFile, srcFileInfo, err := openFile(args.Path) + if err != nil { + return err + } + + // Close file on function exit + defer srcFile.Close() + + fmt.Fprintf(args.Out, "Creating directory %s\n", srcFileInfo.Name()) + // Make directory on drive + f, err := self.mkdir(MkdirArgs{ + Out: args.Out, + Name: srcFileInfo.Name(), + Parents: args.Parents, + }) + if err != nil { + return err + } + + // Read files from directory + names, err := srcFile.Readdirnames(0) + if err != nil && err != io.EOF { + return fmt.Errorf("Failed reading directory: %s", err) + } + + for _, name := range names { + // Copy args and set new path and parents + newArgs := args + newArgs.Path = filepath.Join(args.Path, name) + newArgs.Parents = []string{f.Id} + + // Upload + err = self.uploadRecursive(newArgs) + if err != nil { + return err + } + } + + return nil } func (self *Drive) uploadFile(args UploadArgs) (*drive.File, int64, error) { - srcFile, srcFileInfo, err := openFile(args.Path) - if err != nil { - return nil, 0, err - } + srcFile, srcFileInfo, err := openFile(args.Path) + if err != nil { + return nil, 0, err + } - // Close file on function exit - defer srcFile.Close() + // Close file on function exit + defer srcFile.Close() - // Instantiate empty drive file - dstFile := &drive.File{} + // Instantiate empty drive file + dstFile := &drive.File{} - // Use provided file name or use filename - if args.Name == "" { - dstFile.Name = filepath.Base(srcFileInfo.Name()) - } else { - dstFile.Name = args.Name - } + // Use provided file name or use filename + if args.Name == "" { + dstFile.Name = filepath.Base(srcFileInfo.Name()) + } else { + dstFile.Name = args.Name + } - // Set provided mime type or get type based on file extension - if args.Mime == "" { - dstFile.MimeType = mime.TypeByExtension(filepath.Ext(dstFile.Name)) - } else { - dstFile.MimeType = args.Mime - } + // Set provided mime type or get type based on file extension + if args.Mime == "" { + dstFile.MimeType = mime.TypeByExtension(filepath.Ext(dstFile.Name)) + } else { + dstFile.MimeType = args.Mime + } - // Set parent folders - dstFile.Parents = args.Parents + // Set parent folders + dstFile.Parents = args.Parents - // Chunk size option - chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) + // Chunk size option + chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) - // Wrap file in progress reader - progressReader := getProgressReader(srcFile, args.Progress, srcFileInfo.Size()) + // Wrap file in progress reader + progressReader := getProgressReader(srcFile, args.Progress, srcFileInfo.Size()) - // Wrap reader in timeout reader - reader, ctx := getTimeoutReaderContext(progressReader) + // Wrap reader in timeout reader + reader, ctx := getTimeoutReaderContext(progressReader) - fmt.Fprintf(args.Out, "Uploading %s\n", args.Path) - started := time.Now() + fmt.Fprintf(args.Out, "Uploading %s\n", args.Path) + started := time.Now() - f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size", "md5Checksum", "webContentLink").Context(ctx).Media(reader, chunkSize).Do() - if err != nil { - return nil, 0, fmt.Errorf("Failed to upload file: %s", err) - } + f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size", "md5Checksum", "webContentLink").Context(ctx).Media(reader, chunkSize).Do() + if err != nil { + return nil, 0, fmt.Errorf("Failed to upload file: %s", err) + } - // Calculate average upload rate - rate := calcRate(f.Size, started, time.Now()) + // Calculate average upload rate + rate := calcRate(f.Size, started, time.Now()) - return f, rate, nil + return f, rate, nil } type UploadStreamArgs struct { - Out io.Writer - In io.Reader - Name string - Parents []string - Mime string - Share bool - ChunkSize int64 - Progress io.Writer + Out io.Writer + In io.Reader + Name string + Parents []string + Mime string + Share bool + ChunkSize int64 + Progress io.Writer } func (self *Drive) UploadStream(args UploadStreamArgs) error { - if args.ChunkSize > intMax() - 1 { - return fmt.Errorf("Chunk size is to big, max chunk size for this computer is %d", intMax() - 1) - } + if args.ChunkSize > intMax()-1 { + return fmt.Errorf("Chunk size is to big, max chunk size for this computer is %d", intMax()-1) + } - // Instantiate empty drive file - dstFile := &drive.File{Name: args.Name} + // Instantiate empty drive file + dstFile := &drive.File{Name: args.Name} - // Set mime type if provided - if args.Mime != "" { - dstFile.MimeType = args.Mime - } + // Set mime type if provided + if args.Mime != "" { + dstFile.MimeType = args.Mime + } - // Set parent folders - dstFile.Parents = args.Parents + // Set parent folders + dstFile.Parents = args.Parents - // Chunk size option - chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) + // Chunk size option + chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) - // Wrap file in progress reader - progressReader := getProgressReader(args.In, args.Progress, 0) + // Wrap file in progress reader + progressReader := getProgressReader(args.In, args.Progress, 0) - // Wrap reader in timeout reader - reader, ctx := getTimeoutReaderContext(progressReader) + // Wrap reader in timeout reader + reader, ctx := getTimeoutReaderContext(progressReader) - fmt.Fprintf(args.Out, "Uploading %s\n", dstFile.Name) - started := time.Now() + fmt.Fprintf(args.Out, "Uploading %s\n", dstFile.Name) + started := time.Now() - f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size", "webContentLink").Context(ctx).Media(reader, chunkSize).Do() - if err != nil { - return fmt.Errorf("Failed to upload file: %s", err) - } + f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size", "webContentLink").Context(ctx).Media(reader, chunkSize).Do() + if err != nil { + return fmt.Errorf("Failed to upload file: %s", err) + } - // Calculate average upload rate - rate := calcRate(f.Size, started, time.Now()) + // Calculate average upload rate + rate := calcRate(f.Size, started, time.Now()) - fmt.Fprintf(args.Out, "Uploaded %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(f.Size, false)) - if args.Share { - err = self.shareAnyoneReader(f.Id) - if err != nil { - return err - } + fmt.Fprintf(args.Out, "Uploaded %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(f.Size, false)) + if args.Share { + err = self.shareAnyoneReader(f.Id) + if err != nil { + return err + } - fmt.Fprintf(args.Out, "File is readable by anyone at %s\n", f.WebContentLink) - } - return nil + fmt.Fprintf(args.Out, "File is readable by anyone at %s\n", f.WebContentLink) + } + return nil } diff --git a/drive/util.go b/drive/util.go index 8891e12..181b9b9 100644 --- a/drive/util.go +++ b/drive/util.go @@ -1,169 +1,169 @@ package drive import ( - "os" - "fmt" - "path/filepath" - "strings" - "strconv" - "unicode/utf8" - "math" - "time" + "fmt" + "math" + "os" + "path/filepath" + "strconv" + "strings" + "time" + "unicode/utf8" ) type kv struct { - key string - value string + key string + value string } func formatList(a []string) string { - return strings.Join(a, ", ") + return strings.Join(a, ", ") } func formatSize(bytes int64, forceBytes bool) string { - if bytes == 0 { - return "" - } + if bytes == 0 { + return "" + } - if forceBytes { - return fmt.Sprintf("%v B", bytes) - } + if forceBytes { + return fmt.Sprintf("%v B", bytes) + } - units := []string{"B", "KB", "MB", "GB", "TB", "PB"} + units := []string{"B", "KB", "MB", "GB", "TB", "PB"} - var i int - value := float64(bytes) + var i int + value := float64(bytes) - for value > 1000 { - value /= 1000 - i++ - } - return fmt.Sprintf("%.1f %s", value, units[i]) + for value > 1000 { + value /= 1000 + i++ + } + return fmt.Sprintf("%.1f %s", value, units[i]) } func calcRate(bytes int64, start, end time.Time) int64 { - seconds := float64(end.Sub(start).Seconds()) - if seconds < 1.0 { - return bytes - } - return round(float64(bytes) / seconds) + seconds := float64(end.Sub(start).Seconds()) + if seconds < 1.0 { + return bytes + } + return round(float64(bytes) / seconds) } func round(n float64) int64 { - if n < 0 { - return int64(math.Ceil(n - 0.5)) - } - return int64(math.Floor(n + 0.5)) + if n < 0 { + return int64(math.Ceil(n - 0.5)) + } + return int64(math.Floor(n + 0.5)) } func formatBool(b bool) string { - return strings.Title(strconv.FormatBool(b)) + return strings.Title(strconv.FormatBool(b)) } func formatDatetime(iso string) string { - t, err := time.Parse(time.RFC3339, iso) - if err != nil { - return iso - } - local := t.Local() - year, month, day := local.Date() - hour, min, sec := local.Clock() - return fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, min, sec) + t, err := time.Parse(time.RFC3339, iso) + if err != nil { + return iso + } + local := t.Local() + year, month, day := local.Date() + hour, min, sec := local.Clock() + return fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, min, sec) } // Truncates string to given max length, and inserts ellipsis into // the middle of the string to signify that the string has been truncated func truncateString(str string, maxRunes int) string { - indicator := "..." + indicator := "..." - // Number of runes in string - runeCount := utf8.RuneCountInString(str) + // Number of runes in string + runeCount := utf8.RuneCountInString(str) - // Return input string if length of input string is less than max length - // Input string is also returned if max length is less than 9 which is the minmal supported length - if runeCount <= maxRunes || maxRunes < 9 { - return str - } + // Return input string if length of input string is less than max length + // Input string is also returned if max length is less than 9 which is the minmal supported length + if runeCount <= maxRunes || maxRunes < 9 { + return str + } - // Number of remaining runes to be removed - remaining := (runeCount - maxRunes) + utf8.RuneCountInString(indicator) + // Number of remaining runes to be removed + remaining := (runeCount - maxRunes) + utf8.RuneCountInString(indicator) - var truncated string - var skip bool + var truncated string + var skip bool - for leftOffset, char := range str { - rightOffset := runeCount - (leftOffset + remaining) + for leftOffset, char := range str { + rightOffset := runeCount - (leftOffset + remaining) - // Start skipping chars when the left and right offsets are equal - // Or in the case where we wont be able to do an even split: when the left offset is larger than the right offset - if leftOffset == rightOffset || (leftOffset > rightOffset && !skip) { - skip = true - truncated += indicator - } + // Start skipping chars when the left and right offsets are equal + // Or in the case where we wont be able to do an even split: when the left offset is larger than the right offset + if leftOffset == rightOffset || (leftOffset > rightOffset && !skip) { + skip = true + truncated += indicator + } - if skip && remaining > 0 { - // Skip char and decrement the remaining skip counter - remaining-- - continue - } + if skip && remaining > 0 { + // Skip char and decrement the remaining skip counter + remaining-- + continue + } - // Add char to result string - truncated += string(char) - } + // Add char to result string + truncated += string(char) + } - // Return truncated string - return truncated + // Return truncated string + return truncated } func fileExists(path string) bool { - _, err := os.Stat(path) - if err == nil { - return true - } - return false + _, err := os.Stat(path) + if err == nil { + return true + } + return false } func mkdir(path string) error { - dir := filepath.Dir(path) - if fileExists(dir) { - return nil - } - return os.MkdirAll(dir, 0775) + dir := filepath.Dir(path) + if fileExists(dir) { + return nil + } + return os.MkdirAll(dir, 0775) } func intMax() int64 { - return 1 << (strconv.IntSize - 1) - 1 + return 1<<(strconv.IntSize-1) - 1 } func pathLength(path string) int { - return strings.Count(path, string(os.PathSeparator)) + return strings.Count(path, string(os.PathSeparator)) } func parentFilePath(path string) string { - dir, _ := filepath.Split(path) - return filepath.Dir(dir) + dir, _ := filepath.Split(path) + return filepath.Dir(dir) } func pow(x int, y int) int { - f := math.Pow(float64(x), float64(y)) - return int(f) + f := math.Pow(float64(x), float64(y)) + return int(f) } func min(x int, y int) int { - n := math.Min(float64(x), float64(y)) - return int(n) + n := math.Min(float64(x), float64(y)) + return int(n) } func openFile(path string) (*os.File, os.FileInfo, error) { - f, err := os.Open(path) - if err != nil { - return nil, nil, fmt.Errorf("Failed to open file: %s", err) - } + f, err := os.Open(path) + if err != nil { + return nil, nil, fmt.Errorf("Failed to open file: %s", err) + } - info, err := f.Stat() - if err != nil { - return nil, nil, fmt.Errorf("Failed getting file metadata: %s", err) - } + info, err := f.Stat() + if err != nil { + return nil, nil, fmt.Errorf("Failed getting file metadata: %s", err) + } - return f, info, nil + return f, info, nil } |
