aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPetter Rasmussen2016-02-21 21:03:26 +0100
committerPetter Rasmussen2016-02-21 21:03:26 +0100
commit1973512dd8edca24df4124fb3dfac4a432a0d481 (patch)
treec61daefa5cf24eb2211ac816862697f9e0676d86
parent701c7f1991ae765a51b0b7404d1edbb2dc523055 (diff)
downloadgdrive-1973512dd8edca24df4124fb3dfac4a432a0d481.tar.bz2
go fmt
-rw-r--r--auth/file_source.go85
-rw-r--r--auth/oauth.go106
-rw-r--r--auth/util.go24
-rw-r--r--cli/context.go17
-rw-r--r--cli/flags.go180
-rw-r--r--cli/handler.go137
-rw-r--r--cli/parser.go386
-rw-r--r--compare.go76
-rw-r--r--drive/about.go76
-rw-r--r--drive/changes.go158
-rw-r--r--drive/delete.go46
-rw-r--r--drive/download.go416
-rw-r--r--drive/drive.go16
-rw-r--r--drive/errors.go18
-rw-r--r--drive/export.go172
-rw-r--r--drive/import.go80
-rw-r--r--drive/info.go94
-rw-r--r--drive/list.go206
-rw-r--r--drive/mkdir.go42
-rw-r--r--drive/path.go80
-rw-r--r--drive/progress.go148
-rw-r--r--drive/revision_delete.go36
-rw-r--r--drive/revision_download.go98
-rw-r--r--drive/revision_list.go92
-rw-r--r--drive/share.go152
-rw-r--r--drive/sync.go836
-rw-r--r--drive/sync_download.go558
-rw-r--r--drive/sync_list.go150
-rw-r--r--drive/sync_upload.go801
-rw-r--r--drive/timeout_reader.go114
-rw-r--r--drive/update.go106
-rw-r--r--drive/upload.go402
-rw-r--r--drive/util.go200
-rw-r--r--gdrive.go1486
-rw-r--r--handlers_drive.go607
-rw-r--r--handlers_meta.go122
-rw-r--r--util.go102
37 files changed, 4212 insertions, 4213 deletions
diff --git a/auth/file_source.go b/auth/file_source.go
index 1c1150b..5200203 100644
--- a/auth/file_source.go
+++ b/auth/file_source.go
@@ -1,68 +1,67 @@
package auth
import (
- "golang.org/x/oauth2"
- "encoding/json"
- "os"
- "io/ioutil"
+ "encoding/json"
+ "golang.org/x/oauth2"
+ "io/ioutil"
+ "os"
)
-
func FileSource(path string, token *oauth2.Token, conf *oauth2.Config) oauth2.TokenSource {
- return &fileSource{
- tokenPath: path,
- tokenSource: conf.TokenSource(oauth2.NoContext, token),
- }
+ return &fileSource{
+ tokenPath: path,
+ tokenSource: conf.TokenSource(oauth2.NoContext, token),
+ }
}
type fileSource struct {
- tokenPath string
- tokenSource oauth2.TokenSource
+ tokenPath string
+ tokenSource oauth2.TokenSource
}
func (self *fileSource) Token() (*oauth2.Token, error) {
- token, err := self.tokenSource.Token()
- if err != nil {
- return token, err
- }
+ token, err := self.tokenSource.Token()
+ if err != nil {
+ return token, err
+ }
- // Save token to file
- SaveToken(self.tokenPath, token)
+ // Save token to file
+ SaveToken(self.tokenPath, token)
- return token, nil
+ return token, nil
}
func ReadToken(path string) (*oauth2.Token, bool, error) {
- if !fileExists(path) {
- return nil, false, nil
- }
+ if !fileExists(path) {
+ return nil, false, nil
+ }
- content, err := ioutil.ReadFile(path)
- if err != nil {
- return nil, true, err
- }
- token := &oauth2.Token{}
- return token, true, json.Unmarshal(content, token)
+ content, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, true, err
+ }
+ token := &oauth2.Token{}
+ return token, true, json.Unmarshal(content, token)
}
func SaveToken(path string, token *oauth2.Token) error {
- data, err := json.MarshalIndent(token, "", " ")
- if err != nil {
- return err
- }
+ data, err := json.MarshalIndent(token, "", " ")
+ if err != nil {
+ return err
+ }
- if err = mkdir(path); err != nil {
- return err
- }
+ if err = mkdir(path); err != nil {
+ return err
+ }
- // Write to temp file first
- tmpFile := path + ".tmp"
- err = ioutil.WriteFile(tmpFile, data, 0600)
- if err != nil {
- os.Remove(tmpFile)
- return err
- }
+ // Write to temp file first
+ tmpFile := path + ".tmp"
+ err = ioutil.WriteFile(tmpFile, data, 0600)
+ if err != nil {
+ os.Remove(tmpFile)
+ return err
+ }
- // Move file to correct path
- return os.Rename(tmpFile, path)
+ // Move file to correct path
+ return os.Rename(tmpFile, path)
}
diff --git a/auth/oauth.go b/auth/oauth.go
index 965c7cc..150642c 100644
--- a/auth/oauth.go
+++ b/auth/oauth.go
@@ -1,78 +1,78 @@
package auth
import (
- "fmt"
- "time"
- "net/http"
- "golang.org/x/oauth2"
+ "fmt"
+ "golang.org/x/oauth2"
+ "net/http"
+ "time"
)
type authCodeFn func(string) func() string
func NewFileSourceClient(clientId, clientSecret, tokenFile string, authFn authCodeFn) (*http.Client, error) {
- conf := getConfig(clientId, clientSecret)
+ conf := getConfig(clientId, clientSecret)
- // Read cached token
- token, exists, err := ReadToken(tokenFile)
- if err != nil {
- return nil, fmt.Errorf("Failed to read token: %s", err)
- }
+ // Read cached token
+ token, exists, err := ReadToken(tokenFile)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to read token: %s", err)
+ }
- // Require auth code if token file does not exist
- // or refresh token is missing
- if !exists || token.RefreshToken == "" {
- authUrl := conf.AuthCodeURL("state", oauth2.AccessTypeOffline)
- authCode := authFn(authUrl)()
- token, err = conf.Exchange(oauth2.NoContext, authCode)
- if err != nil {
- return nil, fmt.Errorf("Failed to exchange auth code for token: %s", err)
- }
- }
+ // Require auth code if token file does not exist
+ // or refresh token is missing
+ if !exists || token.RefreshToken == "" {
+ authUrl := conf.AuthCodeURL("state", oauth2.AccessTypeOffline)
+ authCode := authFn(authUrl)()
+ token, err = conf.Exchange(oauth2.NoContext, authCode)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to exchange auth code for token: %s", err)
+ }
+ }
- return oauth2.NewClient(
- oauth2.NoContext,
- FileSource(tokenFile, token, conf),
- ), nil
+ return oauth2.NewClient(
+ oauth2.NoContext,
+ FileSource(tokenFile, token, conf),
+ ), nil
}
func NewRefreshTokenClient(clientId, clientSecret, refreshToken string) *http.Client {
- conf := getConfig(clientId, clientSecret)
+ conf := getConfig(clientId, clientSecret)
- token := &oauth2.Token{
- TokenType: "Bearer",
- RefreshToken: refreshToken,
- Expiry: time.Now(),
- }
+ token := &oauth2.Token{
+ TokenType: "Bearer",
+ RefreshToken: refreshToken,
+ Expiry: time.Now(),
+ }
- return oauth2.NewClient(
- oauth2.NoContext,
- conf.TokenSource(oauth2.NoContext, token),
- )
+ return oauth2.NewClient(
+ oauth2.NoContext,
+ conf.TokenSource(oauth2.NoContext, token),
+ )
}
func NewAccessTokenClient(clientId, clientSecret, accessToken string) *http.Client {
- conf := getConfig(clientId, clientSecret)
+ conf := getConfig(clientId, clientSecret)
- token := &oauth2.Token{
- TokenType: "Bearer",
- AccessToken: accessToken,
- }
+ token := &oauth2.Token{
+ TokenType: "Bearer",
+ AccessToken: accessToken,
+ }
- return oauth2.NewClient(
- oauth2.NoContext,
- conf.TokenSource(oauth2.NoContext, token),
- )
+ return oauth2.NewClient(
+ oauth2.NoContext,
+ conf.TokenSource(oauth2.NoContext, token),
+ )
}
func getConfig(clientId, clientSecret string) *oauth2.Config {
- return &oauth2.Config{
- ClientID: clientId,
- ClientSecret: clientSecret,
- Scopes: []string{"https://www.googleapis.com/auth/drive"},
- RedirectURL: "urn:ietf:wg:oauth:2.0:oob",
- Endpoint: oauth2.Endpoint{
- AuthURL: "https://accounts.google.com/o/oauth2/auth",
- TokenURL: "https://accounts.google.com/o/oauth2/token",
- },
- }
+ return &oauth2.Config{
+ ClientID: clientId,
+ ClientSecret: clientSecret,
+ Scopes: []string{"https://www.googleapis.com/auth/drive"},
+ RedirectURL: "urn:ietf:wg:oauth:2.0:oob",
+ Endpoint: oauth2.Endpoint{
+ AuthURL: "https://accounts.google.com/o/oauth2/auth",
+ TokenURL: "https://accounts.google.com/o/oauth2/token",
+ },
+ }
}
diff --git a/auth/util.go b/auth/util.go
index b053c1f..dfa4adf 100644
--- a/auth/util.go
+++ b/auth/util.go
@@ -1,22 +1,22 @@
package auth
import (
- "os"
- "path/filepath"
+ "os"
+ "path/filepath"
)
func mkdir(path string) error {
- dir := filepath.Dir(path)
- if fileExists(dir) {
- return nil
- }
- return os.Mkdir(dir, 0700)
+ dir := filepath.Dir(path)
+ if fileExists(dir) {
+ return nil
+ }
+ return os.Mkdir(dir, 0700)
}
func fileExists(path string) bool {
- _, err := os.Stat(path)
- if err == nil {
- return true
- }
- return false
+ _, err := os.Stat(path)
+ if err == nil {
+ return true
+ }
+ return false
}
diff --git a/cli/context.go b/cli/context.go
index ce82b17..cafb03f 100644
--- a/cli/context.go
+++ b/cli/context.go
@@ -1,33 +1,32 @@
package cli
-
type Context struct {
- args Arguments
- handlers []*Handler
+ args Arguments
+ handlers []*Handler
}
func (self Context) Args() Arguments {
- return self.args
+ return self.args
}
func (self Context) Handlers() []*Handler {
- return self.handlers
+ return self.handlers
}
type Arguments map[string]interface{}
func (self Arguments) String(key string) string {
- return self[key].(string)
+ return self[key].(string)
}
func (self Arguments) Int64(key string) int64 {
- return self[key].(int64)
+ return self[key].(int64)
}
func (self Arguments) Bool(key string) bool {
- return self[key].(bool)
+ return self[key].(bool)
}
func (self Arguments) StringSlice(key string) []string {
- return self[key].([]string)
+ return self[key].([]string)
}
diff --git a/cli/flags.go b/cli/flags.go
index 6c82ed7..61ecfb4 100644
--- a/cli/flags.go
+++ b/cli/flags.go
@@ -1,162 +1,160 @@
package cli
type Flag interface {
- GetPatterns() []string
- GetName() string
- GetDescription() string
- GetParser() Parser
+ GetPatterns() []string
+ GetName() string
+ GetDescription() string
+ GetParser() Parser
}
func getFlagParser(flags []Flag) Parser {
- var parsers []Parser
+ var parsers []Parser
- for _, flag := range flags {
- parsers = append(parsers, flag.GetParser())
- }
+ for _, flag := range flags {
+ parsers = append(parsers, flag.GetParser())
+ }
- return FlagParser{parsers}
+ return FlagParser{parsers}
}
-
type BoolFlag struct {
- Patterns []string
- Name string
- Description string
- DefaultValue bool
- OmitValue bool
+ Patterns []string
+ Name string
+ Description string
+ DefaultValue bool
+ OmitValue bool
}
func (self BoolFlag) GetName() string {
- return self.Name
+ return self.Name
}
func (self BoolFlag) GetPatterns() []string {
- return self.Patterns
+ return self.Patterns
}
func (self BoolFlag) GetDescription() string {
- return self.Description
+ return self.Description
}
func (self BoolFlag) GetParser() Parser {
- var parsers []Parser
- for _, p := range self.Patterns {
- parsers = append(parsers, BoolFlagParser{
- pattern: p,
- key: self.Name,
- omitValue: self.OmitValue,
- defaultValue: self.DefaultValue,
- })
- }
-
- if len(parsers) == 1 {
- return parsers[0]
- }
- return ShortCircuitParser{parsers}
+ var parsers []Parser
+ for _, p := range self.Patterns {
+ parsers = append(parsers, BoolFlagParser{
+ pattern: p,
+ key: self.Name,
+ omitValue: self.OmitValue,
+ defaultValue: self.DefaultValue,
+ })
+ }
+
+ if len(parsers) == 1 {
+ return parsers[0]
+ }
+ return ShortCircuitParser{parsers}
}
-
type StringFlag struct {
- Patterns []string
- Name string
- Description string
- DefaultValue string
+ Patterns []string
+ Name string
+ Description string
+ DefaultValue string
}
func (self StringFlag) GetName() string {
- return self.Name
+ return self.Name
}
func (self StringFlag) GetPatterns() []string {
- return self.Patterns
+ return self.Patterns
}
func (self StringFlag) GetDescription() string {
- return self.Description
+ return self.Description
}
func (self StringFlag) GetParser() Parser {
- var parsers []Parser
- for _, p := range self.Patterns {
- parsers = append(parsers, StringFlagParser{
- pattern: p,
- key: self.Name,
- defaultValue: self.DefaultValue,
- })
- }
+ var parsers []Parser
+ for _, p := range self.Patterns {
+ parsers = append(parsers, StringFlagParser{
+ pattern: p,
+ key: self.Name,
+ defaultValue: self.DefaultValue,
+ })
+ }
- if len(parsers) == 1 {
- return parsers[0]
- }
- return ShortCircuitParser{parsers}
+ if len(parsers) == 1 {
+ return parsers[0]
+ }
+ return ShortCircuitParser{parsers}
}
type IntFlag struct {
- Patterns []string
- Name string
- Description string
- DefaultValue int64
+ Patterns []string
+ Name string
+ Description string
+ DefaultValue int64
}
func (self IntFlag) GetName() string {
- return self.Name
+ return self.Name
}
func (self IntFlag) GetPatterns() []string {
- return self.Patterns
+ return self.Patterns
}
func (self IntFlag) GetDescription() string {
- return self.Description
+ return self.Description
}
func (self IntFlag) GetParser() Parser {
- var parsers []Parser
- for _, p := range self.Patterns {
- parsers = append(parsers, IntFlagParser{
- pattern: p,
- key: self.Name,
- defaultValue: self.DefaultValue,
- })
- }
+ var parsers []Parser
+ for _, p := range self.Patterns {
+ parsers = append(parsers, IntFlagParser{
+ pattern: p,
+ key: self.Name,
+ defaultValue: self.DefaultValue,
+ })
+ }
- if len(parsers) == 1 {
- return parsers[0]
- }
- return ShortCircuitParser{parsers}
+ if len(parsers) == 1 {
+ return parsers[0]
+ }
+ return ShortCircuitParser{parsers}
}
type StringSliceFlag struct {
- Patterns []string
- Name string
- Description string
- DefaultValue []string
+ Patterns []string
+ Name string
+ Description string
+ DefaultValue []string
}
func (self StringSliceFlag) GetName() string {
- return self.Name
+ return self.Name
}
func (self StringSliceFlag) GetPatterns() []string {
- return self.Patterns
+ return self.Patterns
}
func (self StringSliceFlag) GetDescription() string {
- return self.Description
+ return self.Description
}
func (self StringSliceFlag) GetParser() Parser {
- var parsers []Parser
- for _, p := range self.Patterns {
- parsers = append(parsers, StringSliceFlagParser{
- pattern: p,
- key: self.Name,
- defaultValue: self.DefaultValue,
- })
- }
-
- if len(parsers) == 1 {
- return parsers[0]
- }
- return ShortCircuitParser{parsers}
+ var parsers []Parser
+ for _, p := range self.Patterns {
+ parsers = append(parsers, StringSliceFlagParser{
+ pattern: p,
+ key: self.Name,
+ defaultValue: self.DefaultValue,
+ })
+ }
+
+ if len(parsers) == 1 {
+ return parsers[0]
+ }
+ return ShortCircuitParser{parsers}
}
diff --git a/cli/handler.go b/cli/handler.go
index a1a7257..3c53e7e 100644
--- a/cli/handler.go
+++ b/cli/handler.go
@@ -1,119 +1,118 @@
package cli
import (
- "regexp"
- "strings"
+ "regexp"
+ "strings"
)
-func NewFlagGroup(name string, flags...Flag) FlagGroup {
- return FlagGroup{
- Name: name,
- Flags: flags,
- }
+func NewFlagGroup(name string, flags ...Flag) FlagGroup {
+ return FlagGroup{
+ Name: name,
+ Flags: flags,
+ }
}
type FlagGroup struct {
- Name string
- Flags []Flag
+ Name string
+ Flags []Flag
}
type FlagGroups []FlagGroup
func (groups FlagGroups) getFlags(name string) []Flag {
- for _, group := range groups {
- if group.Name == name {
- return group.Flags
- }
- }
+ for _, group := range groups {
+ if group.Name == name {
+ return group.Flags
+ }
+ }
- return nil
+ return nil
}
var handlers []*Handler
type Handler struct {
- Pattern string
- FlagGroups FlagGroups
- Callback func(Context)
- Description string
+ Pattern string
+ FlagGroups FlagGroups
+ Callback func(Context)
+ Description string
}
func (self *Handler) getParser() Parser {
- var parsers []Parser
-
- for _, pattern := range self.SplitPattern() {
- if isFlagGroup(pattern) {
- groupName := flagGroupName(pattern)
- flags := self.FlagGroups.getFlags(groupName)
- parsers = append(parsers, getFlagParser(flags))
- } else if isCaptureGroup(pattern) {
- parsers = append(parsers, CaptureGroupParser{pattern})
- } else {
- parsers = append(parsers, EqualParser{pattern})
- }
- }
-
- return CompleteParser{parsers}
+ var parsers []Parser
+
+ for _, pattern := range self.SplitPattern() {
+ if isFlagGroup(pattern) {
+ groupName := flagGroupName(pattern)
+ flags := self.FlagGroups.getFlags(groupName)
+ parsers = append(parsers, getFlagParser(flags))
+ } else if isCaptureGroup(pattern) {
+ parsers = append(parsers, CaptureGroupParser{pattern})
+ } else {
+ parsers = append(parsers, EqualParser{pattern})
+ }
+ }
+
+ return CompleteParser{parsers}
}
// Split on spaces but ignore spaces inside <...> and [...]
func (self *Handler) SplitPattern() []string {
- re := regexp.MustCompile(`(<[^>]+>|\[[^\]]+]|\S+)`)
- matches := []string{}
+ re := regexp.MustCompile(`(<[^>]+>|\[[^\]]+]|\S+)`)
+ matches := []string{}
- for _, value := range re.FindAllStringSubmatch(self.Pattern, -1) {
- matches = append(matches, value[1])
- }
+ for _, value := range re.FindAllStringSubmatch(self.Pattern, -1) {
+ matches = append(matches, value[1])
+ }
- return matches
+ return matches
}
func SetHandlers(h []*Handler) {
- handlers = h
+ handlers = h
}
func AddHandler(pattern string, groups FlagGroups, callback func(Context), desc string) {
- handlers = append(handlers, &Handler{
- Pattern: pattern,
- FlagGroups: groups,
- Callback: callback,
- Description: desc,
- })
+ handlers = append(handlers, &Handler{
+ Pattern: pattern,
+ FlagGroups: groups,
+ Callback: callback,
+ Description: desc,
+ })
}
func findHandler(args []string) *Handler {
- for _, h := range handlers {
- if _, ok := h.getParser().Match(args); ok {
- return h
- }
- }
- return nil
+ for _, h := range handlers {
+ if _, ok := h.getParser().Match(args); ok {
+ return h
+ }
+ }
+ return nil
}
-
func Handle(args []string) bool {
- h := findHandler(args)
- if h == nil {
- return false
- }
-
- _, data := h.getParser().Capture(args)
- ctx := Context{
- args: data,
- handlers: handlers,
- }
- h.Callback(ctx)
- return true
+ h := findHandler(args)
+ if h == nil {
+ return false
+ }
+
+ _, data := h.getParser().Capture(args)
+ ctx := Context{
+ args: data,
+ handlers: handlers,
+ }
+ h.Callback(ctx)
+ return true
}
func isCaptureGroup(arg string) bool {
- return strings.HasPrefix(arg, "<") && strings.HasSuffix(arg, ">")
+ return strings.HasPrefix(arg, "<") && strings.HasSuffix(arg, ">")
}
func isFlagGroup(arg string) bool {
- return strings.HasPrefix(arg, "[") && strings.HasSuffix(arg, "]")
+ return strings.HasPrefix(arg, "[") && strings.HasSuffix(arg, "]")
}
func flagGroupName(s string) string {
- return s[1:len(s) - 1]
+ return s[1 : len(s)-1]
}
diff --git a/cli/parser.go b/cli/parser.go
index 5fbbe3f..e1b5bc1 100644
--- a/cli/parser.go
+++ b/cli/parser.go
@@ -1,357 +1,351 @@
package cli
import (
- "fmt"
- "strconv"
+ "fmt"
+ "strconv"
)
type Parser interface {
- Match([]string) ([]string, bool)
- Capture([]string) ([]string, map[string]interface{})
+ Match([]string) ([]string, bool)
+ Capture([]string) ([]string, map[string]interface{})
}
type EqualParser struct {
- value string
+ value string
}
func (self EqualParser) Match(values []string) ([]string, bool) {
- if len(values) == 0 {
- return values, false
- }
+ if len(values) == 0 {
+ return values, false
+ }
- if self.value == values[0] {
- return values[1:], true
- }
+ if self.value == values[0] {
+ return values[1:], true
+ }
- return values, false
+ return values, false
}
func (self EqualParser) Capture(values []string) ([]string, map[string]interface{}) {
- remainingValues, _ := self.Match(values)
- return remainingValues, nil
+ remainingValues, _ := self.Match(values)
+ return remainingValues, nil
}
func (self EqualParser) String() string {
- return fmt.Sprintf("EqualParser '%s'", self.value)
+ return fmt.Sprintf("EqualParser '%s'", self.value)
}
-
type CaptureGroupParser struct {
- value string
+ value string
}
func (self CaptureGroupParser) Match(values []string) ([]string, bool) {
- if len(values) == 0 {
- return values, false
- }
+ if len(values) == 0 {
+ return values, false
+ }
- return values[1:], true
+ return values[1:], true
}
func (self CaptureGroupParser) key() string {
- return self.value[1:len(self.value) - 1]
+ return self.value[1 : len(self.value)-1]
}
func (self CaptureGroupParser) Capture(values []string) ([]string, map[string]interface{}) {
- if remainingValues, ok := self.Match(values); ok {
- return remainingValues, map[string]interface{}{self.key(): values[0]}
- }
+ if remainingValues, ok := self.Match(values); ok {
+ return remainingValues, map[string]interface{}{self.key(): values[0]}
+ }
- return values, nil
+ return values, nil
}
func (self CaptureGroupParser) String() string {
- return fmt.Sprintf("CaptureGroupParser '%s'", self.value)
+ return fmt.Sprintf("CaptureGroupParser '%s'", self.value)
}
-
-
type BoolFlagParser struct {
- pattern string
- key string
- omitValue bool
- defaultValue bool
+ pattern string
+ key string
+ omitValue bool
+ defaultValue bool
}
func (self BoolFlagParser) Match(values []string) ([]string, bool) {
- if self.omitValue {
- return flagKeyMatch(self.pattern, values, 0)
- }
+ if self.omitValue {
+ return flagKeyMatch(self.pattern, values, 0)
+ }
- remaining, value, ok := flagKeyValueMatch(self.pattern, values, 0)
- if !ok {
- return remaining, false
- }
+ remaining, value, ok := flagKeyValueMatch(self.pattern, values, 0)
+ if !ok {
+ return remaining, false
+ }
- // Check that value is a valid boolean
- if _, err := strconv.ParseBool(value); err != nil {
- return remaining, false
- }
+ // Check that value is a valid boolean
+ if _, err := strconv.ParseBool(value); err != nil {
+ return remaining, false
+ }
- return remaining, true
+ return remaining, true
}
func (self BoolFlagParser) Capture(values []string) ([]string, map[string]interface{}) {
- if self.omitValue {
- remaining, ok := flagKeyMatch(self.pattern, values, 0)
- return remaining, map[string]interface{}{self.key: ok}
- }
+ if self.omitValue {
+ remaining, ok := flagKeyMatch(self.pattern, values, 0)
+ return remaining, map[string]interface{}{self.key: ok}
+ }
- remaining, value, ok := flagKeyValueMatch(self.pattern, values, 0)
- if !ok {
- return remaining, map[string]interface{}{self.key: self.defaultValue}
- }
+ remaining, value, ok := flagKeyValueMatch(self.pattern, values, 0)
+ if !ok {
+ return remaining, map[string]interface{}{self.key: self.defaultValue}
+ }
- b, _ := strconv.ParseBool(value)
- return remaining, map[string]interface{}{self.key: b}
+ b, _ := strconv.ParseBool(value)
+ return remaining, map[string]interface{}{self.key: b}
}
func (self BoolFlagParser) String() string {
- return fmt.Sprintf("BoolFlagParser '%s'", self.pattern)
+ return fmt.Sprintf("BoolFlagParser '%s'", self.pattern)
}
type StringFlagParser struct {
- pattern string
- key string
- defaultValue string
+ pattern string
+ key string
+ defaultValue string
}
func (self StringFlagParser) Match(values []string) ([]string, bool) {
- remaining, _, ok := flagKeyValueMatch(self.pattern, values, 0)
- return remaining, ok
+ remaining, _, ok := flagKeyValueMatch(self.pattern, values, 0)
+ return remaining, ok
}
func (self StringFlagParser) Capture(values []string) ([]string, map[string]interface{}) {
- remaining, value, ok := flagKeyValueMatch(self.pattern, values, 0)
- if !ok {
- return remaining, map[string]interface{}{self.key: self.defaultValue}
- }
+ remaining, value, ok := flagKeyValueMatch(self.pattern, values, 0)
+ if !ok {
+ return remaining, map[string]interface{}{self.key: self.defaultValue}
+ }
- return remaining, map[string]interface{}{self.key: value}
+ return remaining, map[string]interface{}{self.key: value}
}
func (self StringFlagParser) String() string {
- return fmt.Sprintf("StringFlagParser '%s'", self.pattern)
+ return fmt.Sprintf("StringFlagParser '%s'", self.pattern)
}
type IntFlagParser struct {
- pattern string
- key string
- defaultValue int64
+ pattern string
+ key string
+ defaultValue int64
}
func (self IntFlagParser) Match(values []string) ([]string, bool) {
- remaining, value, ok := flagKeyValueMatch(self.pattern, values, 0)
- if !ok {
- return remaining, false
- }
+ remaining, value, ok := flagKeyValueMatch(self.pattern, values, 0)
+ if !ok {
+ return remaining, false
+ }
- // Check that value is a valid integer
- if _, err := strconv.ParseInt(value, 10, 64); err != nil {
- return remaining, false
- }
+ // Check that value is a valid integer
+ if _, err := strconv.ParseInt(value, 10, 64); err != nil {
+ return remaining, false
+ }
- return remaining, true
+ return remaining, true
}
func (self IntFlagParser) Capture(values []string) ([]string, map[string]interface{}) {
- remaining, value, ok := flagKeyValueMatch(self.pattern, values, 0)
- if !ok {
- return remaining, map[string]interface{}{self.key: self.defaultValue}
- }
+ remaining, value, ok := flagKeyValueMatch(self.pattern, values, 0)
+ if !ok {
+ return remaining, map[string]interface{}{self.key: self.defaultValue}
+ }
- n, _ := strconv.ParseInt(value, 10, 64)
- return remaining, map[string]interface{}{self.key: n}
+ n, _ := strconv.ParseInt(value, 10, 64)
+ return remaining, map[string]interface{}{self.key: n}
}
func (self IntFlagParser) String() string {
- return fmt.Sprintf("IntFlagParser '%s'", self.pattern)
+ return fmt.Sprintf("IntFlagParser '%s'", self.pattern)
}
-
type StringSliceFlagParser struct {
- pattern string
- key string
- defaultValue []string
+ pattern string
+ key string
+ defaultValue []string
}
func (self StringSliceFlagParser) Match(values []string) ([]string, bool) {
- if len(values) < 2 {
- return values, false
- }
+ if len(values) < 2 {
+ return values, false
+ }
- var remainingValues []string
+ var remainingValues []string
- for i := 0; i < len(values); i++ {
- if values[i] == self.pattern && i + 1 < len(values) {
- i++
- continue
- }
- remainingValues = append(remainingValues, values[i])
- }
+ for i := 0; i < len(values); i++ {
+ if values[i] == self.pattern && i+1 < len(values) {
+ i++
+ continue
+ }
+ remainingValues = append(remainingValues, values[i])
+ }
- return remainingValues, len(values) != len(remainingValues)
+ return remainingValues, len(values) != len(remainingValues)
}
func (self StringSliceFlagParser) Capture(values []string) ([]string, map[string]interface{}) {
- remainingValues, ok := self.Match(values)
- if !ok {
- return values, map[string]interface{}{self.key: self.defaultValue}
- }
+ remainingValues, ok := self.Match(values)
+ if !ok {
+ return values, map[string]interface{}{self.key: self.defaultValue}
+ }
- var captured []string
+ var captured []string
- for i := 0; i < len(values); i++ {
- if values[i] == self.pattern && i + 1 < len(values) {
- captured = append(captured, values[i + 1])
- }
- }
+ for i := 0; i < len(values); i++ {
+ if values[i] == self.pattern && i+1 < len(values) {
+ captured = append(captured, values[i+1])
+ }
+ }
- return remainingValues, map[string]interface{}{self.key: captured}
+ return remainingValues, map[string]interface{}{self.key: captured}
}
func (self StringSliceFlagParser) String() string {
- return fmt.Sprintf("StringSliceFlagParser '%s'", self.pattern)
+ return fmt.Sprintf("StringSliceFlagParser '%s'", self.pattern)
}
-
type FlagParser struct {
- parsers []Parser
+ parsers []Parser
}
func (self FlagParser) Match(values []string) ([]string, bool) {
- remainingValues := values
+ remainingValues := values
- for _, parser := range self.parsers {
- remainingValues, _ = parser.Match(remainingValues)
- }
- return remainingValues, true
+ for _, parser := range self.parsers {
+ remainingValues, _ = parser.Match(remainingValues)
+ }
+ return remainingValues, true
}
func (self FlagParser) Capture(values []string) ([]string, map[string]interface{}) {
- captured := map[string]interface{}{}
- remainingValues := values
+ captured := map[string]interface{}{}
+ remainingValues := values
- for _, parser := range self.parsers {
- var data map[string]interface{}
- remainingValues, data = parser.Capture(remainingValues)
- for key, value := range data {
- captured[key] = value
- }
- }
+ for _, parser := range self.parsers {
+ var data map[string]interface{}
+ remainingValues, data = parser.Capture(remainingValues)
+ for key, value := range data {
+ captured[key] = value
+ }
+ }
- return remainingValues, captured
+ return remainingValues, captured
}
func (self FlagParser) String() string {
- return fmt.Sprintf("FlagParser %v", self.parsers)
+ return fmt.Sprintf("FlagParser %v", self.parsers)
}
-
type ShortCircuitParser struct {
- parsers []Parser
+ parsers []Parser
}
func (self ShortCircuitParser) Match(values []string) ([]string, bool) {
- remainingValues := values
+ remainingValues := values
- for _, parser := range self.parsers {
- var ok bool
- remainingValues, ok = parser.Match(remainingValues)
- if ok {
- return remainingValues, true
- }
- }
+ for _, parser := range self.parsers {
+ var ok bool
+ remainingValues, ok = parser.Match(remainingValues)
+ if ok {
+ return remainingValues, true
+ }
+ }
- return remainingValues, false
+ return remainingValues, false
}
func (self ShortCircuitParser) Capture(values []string) ([]string, map[string]interface{}) {
- if len(self.parsers) == 0 {
- return values, nil
- }
+ if len(self.parsers) == 0 {
+ return values, nil
+ }
- for _, parser := range self.parsers {
- if _, ok := parser.Match(values); ok {
- return parser.Capture(values)
- }
- }
+ for _, parser := range self.parsers {
+ if _, ok := parser.Match(values); ok {
+ return parser.Capture(values)
+ }
+ }
- // No parsers matched at this point,
- // just return the capture value of the first one
- return self.parsers[0].Capture(values)
+ // No parsers matched at this point,
+ // just return the capture value of the first one
+ return self.parsers[0].Capture(values)
}
func (self ShortCircuitParser) String() string {
- return fmt.Sprintf("ShortCircuitParser %v", self.parsers)
+ return fmt.Sprintf("ShortCircuitParser %v", self.parsers)
}
type CompleteParser struct {
- parsers []Parser
+ parsers []Parser
}
func (self CompleteParser) Match(values []string) ([]string, bool) {
- remainingValues := copySlice(values)
+ remainingValues := copySlice(values)
- for _, parser := range self.parsers {
- var ok bool
- remainingValues, ok = parser.Match(remainingValues)
- if !ok {
- return remainingValues, false
- }
- }
+ for _, parser := range self.parsers {
+ var ok bool
+ remainingValues, ok = parser.Match(remainingValues)
+ if !ok {
+ return remainingValues, false
+ }
+ }
- return remainingValues, len(remainingValues) == 0
+ return remainingValues, len(remainingValues) == 0
}
func (self CompleteParser) Capture(values []string) ([]string, map[string]interface{}) {
- remainingValues := copySlice(values)
- data := map[string]interface{}{}
+ remainingValues := copySlice(values)
+ data := map[string]interface{}{}
- for _, parser := range self.parsers {
- var captured map[string]interface{}
- remainingValues, captured = parser.Capture(remainingValues)
- for key, value := range captured {
- data[key] = value
- }
- }
+ for _, parser := range self.parsers {
+ var captured map[string]interface{}
+ remainingValues, captured = parser.Capture(remainingValues)
+ for key, value := range captured {
+ data[key] = value
+ }
+ }
- return remainingValues, data
+ return remainingValues, data
}
func (self CompleteParser) String() string {
- return fmt.Sprintf("CompleteParser %v", self.parsers)
+ return fmt.Sprintf("CompleteParser %v", self.parsers)
}
func flagKeyValueMatch(key string, values []string, index int) ([]string, string, bool) {
- if index > len(values) - 2 {
- return values, "", false
- }
+ if index > len(values)-2 {
+ return values, "", false
+ }
- if values[index] == key {
- value := values[index + 1]
- remaining := append(copySlice(values[:index]), values[index + 2:]...)
- return remaining, value, true
- }
+ if values[index] == key {
+ value := values[index+1]
+ remaining := append(copySlice(values[:index]), values[index+2:]...)
+ return remaining, value, true
+ }
- return flagKeyValueMatch(key, values, index + 1)
+ return flagKeyValueMatch(key, values, index+1)
}
func flagKeyMatch(key string, values []string, index int) ([]string, bool) {
- if index > len(values) - 1 {
- return values, false
- }
+ if index > len(values)-1 {
+ return values, false
+ }
- if values[index] == key {
- remaining := append(copySlice(values[:index]), values[index + 1:]...)
- return remaining, true
- }
+ if values[index] == key {
+ remaining := append(copySlice(values[:index]), values[index+1:]...)
+ return remaining, true
+ }
- return flagKeyMatch(key, values, index + 1)
+ return flagKeyMatch(key, values, index+1)
}
func copySlice(a []string) []string {
- b := make([]string, len(a))
- copy(b, a)
- return b
+ b := make([]string, len(a))
+ copy(b, a)
+ return b
}
diff --git a/compare.go b/compare.go
index 10cab3c..7dd8c86 100644
--- a/compare.go
+++ b/compare.go
@@ -1,74 +1,74 @@
package main
import (
- "os"
- "encoding/json"
"./drive"
+ "encoding/json"
+ "os"
)
const MinCacheFileSize = 5 * 1024 * 1024
-type Md5Comparer struct {}
+type Md5Comparer struct{}
func (self Md5Comparer) Changed(local *drive.LocalFile, remote *drive.RemoteFile) bool {
- return remote.Md5() != md5sum(local.AbsPath())
+ return remote.Md5() != md5sum(local.AbsPath())
}
type CachedFileInfo struct {
- Size int64 `json:"size"`
- Modified int64 `json:"modified"`
- Md5 string `json:"md5"`
+ Size int64 `json:"size"`
+ Modified int64 `json:"modified"`
+ Md5 string `json:"md5"`
}
func NewCachedMd5Comparer(path string) CachedMd5Comparer {
- cache := map[string]*CachedFileInfo{}
-
- f, err := os.Open(path)
- if err == nil {
- json.NewDecoder(f).Decode(&cache)
- }
- f.Close()
- return CachedMd5Comparer{path, cache}
+ cache := map[string]*CachedFileInfo{}
+
+ f, err := os.Open(path)
+ if err == nil {
+ json.NewDecoder(f).Decode(&cache)
+ }
+ f.Close()
+ return CachedMd5Comparer{path, cache}
}
type CachedMd5Comparer struct {
- path string
- cache map[string]*CachedFileInfo
+ path string
+ cache map[string]*CachedFileInfo
}
func (self CachedMd5Comparer) Changed(local *drive.LocalFile, remote *drive.RemoteFile) bool {
- return remote.Md5() != self.md5(local)
+ return remote.Md5() != self.md5(local)
}
func (self CachedMd5Comparer) md5(local *drive.LocalFile) string {
- // See if file exist in cache
- cached, found := self.cache[local.AbsPath()]
+ // See if file exist in cache
+ cached, found := self.cache[local.AbsPath()]
- // If found and modification time and size has not changed, return cached md5
- if found && local.Modified().UnixNano() == cached.Modified && local.Size() == cached.Size {
- return cached.Md5
- }
+ // If found and modification time and size has not changed, return cached md5
+ if found && local.Modified().UnixNano() == cached.Modified && local.Size() == cached.Size {
+ return cached.Md5
+ }
- // Calculate new md5 sum
- md5 := md5sum(local.AbsPath())
+ // Calculate new md5 sum
+ md5 := md5sum(local.AbsPath())
- // Cache file info if file meets size criteria
- if local.Size() > MinCacheFileSize {
- self.cacheAdd(local, md5)
- self.persist()
- }
+ // Cache file info if file meets size criteria
+ if local.Size() > MinCacheFileSize {
+ self.cacheAdd(local, md5)
+ self.persist()
+ }
- return md5
+ return md5
}
func (self CachedMd5Comparer) cacheAdd(lf *drive.LocalFile, md5 string) {
- self.cache[lf.AbsPath()] = &CachedFileInfo{
- Size: lf.Size(),
- Modified: lf.Modified().UnixNano(),
- Md5: md5,
- }
+ self.cache[lf.AbsPath()] = &CachedFileInfo{
+ Size: lf.Size(),
+ Modified: lf.Modified().UnixNano(),
+ Md5: md5,
+ }
}
func (self CachedMd5Comparer) persist() {
- writeJson(self.path, self.cache)
+ writeJson(self.path, self.cache)
}
diff --git a/drive/about.go b/drive/about.go
index 4c23ab8..c2f1643 100644
--- a/drive/about.go
+++ b/drive/about.go
@@ -1,68 +1,68 @@
package drive
import (
- "io"
- "fmt"
- "text/tabwriter"
+ "fmt"
+ "io"
+ "text/tabwriter"
)
type AboutArgs struct {
- Out io.Writer
- SizeInBytes bool
+ Out io.Writer
+ SizeInBytes bool
}
func (self *Drive) About(args AboutArgs) (err error) {
- about, err := self.service.About.Get().Fields("maxImportSizes", "maxUploadSize", "storageQuota", "user").Do()
- if err != nil {
- return fmt.Errorf("Failed to get about: %s", err)
- }
+ about, err := self.service.About.Get().Fields("maxImportSizes", "maxUploadSize", "storageQuota", "user").Do()
+ if err != nil {
+ return fmt.Errorf("Failed to get about: %s", err)
+ }
- user := about.User
- quota := about.StorageQuota
+ user := about.User
+ quota := about.StorageQuota
- fmt.Fprintf(args.Out, "User: %s, %s\n", user.DisplayName, user.EmailAddress)
- fmt.Fprintf(args.Out, "Used: %s\n", formatSize(quota.Usage, args.SizeInBytes))
- fmt.Fprintf(args.Out, "Free: %s\n", formatSize(quota.Limit - quota.Usage, args.SizeInBytes))
- fmt.Fprintf(args.Out, "Total: %s\n", formatSize(quota.Limit, args.SizeInBytes))
- fmt.Fprintf(args.Out, "Max upload size: %s\n", formatSize(about.MaxUploadSize, args.SizeInBytes))
- return
+ fmt.Fprintf(args.Out, "User: %s, %s\n", user.DisplayName, user.EmailAddress)
+ fmt.Fprintf(args.Out, "Used: %s\n", formatSize(quota.Usage, args.SizeInBytes))
+ fmt.Fprintf(args.Out, "Free: %s\n", formatSize(quota.Limit-quota.Usage, args.SizeInBytes))
+ fmt.Fprintf(args.Out, "Total: %s\n", formatSize(quota.Limit, args.SizeInBytes))
+ fmt.Fprintf(args.Out, "Max upload size: %s\n", formatSize(about.MaxUploadSize, args.SizeInBytes))
+ return
}
type AboutImportArgs struct {
- Out io.Writer
+ Out io.Writer
}
func (self *Drive) AboutImport(args AboutImportArgs) (err error) {
- about, err := self.service.About.Get().Fields("importFormats").Do()
- if err != nil {
- return fmt.Errorf("Failed to get about: %s", err)
- }
- printAboutFormats(args.Out, about.ImportFormats)
- return
+ about, err := self.service.About.Get().Fields("importFormats").Do()
+ if err != nil {
+ return fmt.Errorf("Failed to get about: %s", err)
+ }
+ printAboutFormats(args.Out, about.ImportFormats)
+ return
}
type AboutExportArgs struct {
- Out io.Writer
+ Out io.Writer
}
func (self *Drive) AboutExport(args AboutExportArgs) (err error) {
- about, err := self.service.About.Get().Fields("exportFormats").Do()
- if err != nil {
- return fmt.Errorf("Failed to get about: %s", err)
- }
- printAboutFormats(args.Out, about.ExportFormats)
- return
+ about, err := self.service.About.Get().Fields("exportFormats").Do()
+ if err != nil {
+ return fmt.Errorf("Failed to get about: %s", err)
+ }
+ printAboutFormats(args.Out, about.ExportFormats)
+ return
}
func printAboutFormats(out io.Writer, formats map[string][]string) {
- w := new(tabwriter.Writer)
- w.Init(out, 0, 0, 3, ' ', 0)
+ w := new(tabwriter.Writer)
+ w.Init(out, 0, 0, 3, ' ', 0)
- fmt.Fprintln(w, "From\tTo")
+ fmt.Fprintln(w, "From\tTo")
- for from, toFormats := range formats {
- fmt.Fprintf(w, "%s\t%s\n", from, formatList(toFormats))
- }
+ for from, toFormats := range formats {
+ fmt.Fprintf(w, "%s\t%s\n", from, formatList(toFormats))
+ }
- w.Flush()
+ w.Flush()
}
diff --git a/drive/changes.go b/drive/changes.go
index 1d9a89d..ffd7824 100644
--- a/drive/changes.go
+++ b/drive/changes.go
@@ -1,103 +1,103 @@
package drive
import (
- "fmt"
- "io"
- "text/tabwriter"
- "google.golang.org/api/drive/v3"
+ "fmt"
+ "google.golang.org/api/drive/v3"
+ "io"
+ "text/tabwriter"
)
type ListChangesArgs struct {
- Out io.Writer
- PageToken string
- MaxChanges int64
- Now bool
- NameWidth int64
- SkipHeader bool
+ Out io.Writer
+ PageToken string
+ MaxChanges int64
+ Now bool
+ NameWidth int64
+ SkipHeader bool
}
func (self *Drive) ListChanges(args ListChangesArgs) error {
- if args.Now {
- pageToken, err := self.GetChangesStartPageToken()
- if err != nil {
- return err
- }
-
- fmt.Fprintf(args.Out, "Page token: %s\n", pageToken)
- return nil
- }
-
- changeList, err := self.service.Changes.List(args.PageToken).PageSize(args.MaxChanges).RestrictToMyDrive(true).Fields("newStartPageToken", "nextPageToken", "changes(fileId,removed,time,file(id,name,md5Checksum,mimeType,createdTime,modifiedTime))").Do()
- if err != nil {
- return fmt.Errorf("Failed listing changes: %s", err)
- }
-
- PrintChanges(PrintChangesArgs{
- Out: args.Out,
- ChangeList: changeList,
- NameWidth: int(args.NameWidth),
- SkipHeader: args.SkipHeader,
- })
-
- return nil
+ if args.Now {
+ pageToken, err := self.GetChangesStartPageToken()
+ if err != nil {
+ return err
+ }
+
+ fmt.Fprintf(args.Out, "Page token: %s\n", pageToken)
+ return nil
+ }
+
+ changeList, err := self.service.Changes.List(args.PageToken).PageSize(args.MaxChanges).RestrictToMyDrive(true).Fields("newStartPageToken", "nextPageToken", "changes(fileId,removed,time,file(id,name,md5Checksum,mimeType,createdTime,modifiedTime))").Do()
+ if err != nil {
+ return fmt.Errorf("Failed listing changes: %s", err)
+ }
+
+ PrintChanges(PrintChangesArgs{
+ Out: args.Out,
+ ChangeList: changeList,
+ NameWidth: int(args.NameWidth),
+ SkipHeader: args.SkipHeader,
+ })
+
+ return nil
}
func (self *Drive) GetChangesStartPageToken() (string, error) {
- res, err := self.service.Changes.GetStartPageToken().Do()
- if err != nil {
- return "", fmt.Errorf("Failed getting start page token: %s", err)
- }
+ res, err := self.service.Changes.GetStartPageToken().Do()
+ if err != nil {
+ return "", fmt.Errorf("Failed getting start page token: %s", err)
+ }
- return res.StartPageToken, nil
+ return res.StartPageToken, nil
}
type PrintChangesArgs struct {
- Out io.Writer
- ChangeList *drive.ChangeList
- NameWidth int
- SkipHeader bool
+ Out io.Writer
+ ChangeList *drive.ChangeList
+ NameWidth int
+ SkipHeader bool
}
func PrintChanges(args PrintChangesArgs) {
- w := new(tabwriter.Writer)
- w.Init(args.Out, 0, 0, 3, ' ', 0)
-
- if !args.SkipHeader {
- fmt.Fprintln(w, "Id\tName\tAction\tTime")
- }
-
- for _, c := range args.ChangeList.Changes {
- var name string
- var action string
-
- if c.Removed {
- action = "remove"
- } else {
- name = c.File.Name
- action = "update"
- }
-
- fmt.Fprintf(w, "%s\t%s\t%s\t%s\n",
- c.FileId,
- truncateString(name, args.NameWidth),
- action,
- formatDatetime(c.Time),
- )
- }
-
- if len(args.ChangeList.Changes) > 0 {
- w.Flush()
- pageToken, hasMore := nextChangesPageToken(args.ChangeList)
- fmt.Fprintf(args.Out, "\nToken: %s, more: %t\n", pageToken, hasMore)
- } else {
- fmt.Fprintln(args.Out, "No changes")
- }
+ w := new(tabwriter.Writer)
+ w.Init(args.Out, 0, 0, 3, ' ', 0)
+
+ if !args.SkipHeader {
+ fmt.Fprintln(w, "Id\tName\tAction\tTime")
+ }
+
+ for _, c := range args.ChangeList.Changes {
+ var name string
+ var action string
+
+ if c.Removed {
+ action = "remove"
+ } else {
+ name = c.File.Name
+ action = "update"
+ }
+
+ fmt.Fprintf(w, "%s\t%s\t%s\t%s\n",
+ c.FileId,
+ truncateString(name, args.NameWidth),
+ action,
+ formatDatetime(c.Time),
+ )
+ }
+
+ if len(args.ChangeList.Changes) > 0 {
+ w.Flush()
+ pageToken, hasMore := nextChangesPageToken(args.ChangeList)
+ fmt.Fprintf(args.Out, "\nToken: %s, more: %t\n", pageToken, hasMore)
+ } else {
+ fmt.Fprintln(args.Out, "No changes")
+ }
}
func nextChangesPageToken(cl *drive.ChangeList) (string, bool) {
- if cl.NextPageToken != "" {
- return cl.NextPageToken, true
- }
+ if cl.NextPageToken != "" {
+ return cl.NextPageToken, true
+ }
- return cl.NewStartPageToken, false
+ return cl.NewStartPageToken, false
}
diff --git a/drive/delete.go b/drive/delete.go
index bacd4a3..314672c 100644
--- a/drive/delete.go
+++ b/drive/delete.go
@@ -1,39 +1,39 @@
package drive
import (
- "io"
- "fmt"
+ "fmt"
+ "io"
)
type DeleteArgs struct {
- Out io.Writer
- Id string
- Recursive bool
+ Out io.Writer
+ Id string
+ Recursive bool
}
func (self *Drive) Delete(args DeleteArgs) error {
- f, err := self.service.Files.Get(args.Id).Fields("name", "mimeType").Do()
- if err != nil {
- return fmt.Errorf("Failed to get file: %s", err)
- }
+ f, err := self.service.Files.Get(args.Id).Fields("name", "mimeType").Do()
+ if err != nil {
+ return fmt.Errorf("Failed to get file: %s", err)
+ }
- if isDir(f) && !args.Recursive {
- return fmt.Errorf("'%s' is a directory, use the 'recursive' flag to delete directories", f.Name)
- }
+ if isDir(f) && !args.Recursive {
+ return fmt.Errorf("'%s' is a directory, use the 'recursive' flag to delete directories", f.Name)
+ }
- err = self.service.Files.Delete(args.Id).Do()
- if err != nil {
- return fmt.Errorf("Failed to delete file: %s", err)
- }
+ err = self.service.Files.Delete(args.Id).Do()
+ if err != nil {
+ return fmt.Errorf("Failed to delete file: %s", err)
+ }
- fmt.Fprintf(args.Out, "Deleted '%s'\n", f.Name)
- return nil
+ fmt.Fprintf(args.Out, "Deleted '%s'\n", f.Name)
+ return nil
}
func (self *Drive) deleteFile(fileId string) error {
- err := self.service.Files.Delete(fileId).Do()
- if err != nil {
- return fmt.Errorf("Failed to delete file: %s", err)
- }
- return nil
+ err := self.service.Files.Delete(fileId).Do()
+ if err != nil {
+ return fmt.Errorf("Failed to delete file: %s", err)
+ }
+ return nil
}
diff --git a/drive/download.go b/drive/download.go
index 1779d57..15495df 100644
--- a/drive/download.go
+++ b/drive/download.go
@@ -1,245 +1,245 @@
package drive
import (
- "fmt"
- "io"
- "os"
- "time"
- "path/filepath"
- "google.golang.org/api/drive/v3"
- "google.golang.org/api/googleapi"
+ "fmt"
+ "google.golang.org/api/drive/v3"
+ "google.golang.org/api/googleapi"
+ "io"
+ "os"
+ "path/filepath"
+ "time"
)
type DownloadArgs struct {
- Out io.Writer
- Progress io.Writer
- Id string
- Path string
- Force bool
- Recursive bool
- Delete bool
- Stdout bool
+ Out io.Writer
+ Progress io.Writer
+ Id string
+ Path string
+ Force bool
+ Recursive bool
+ Delete bool
+ Stdout bool
}
func (self *Drive) Download(args DownloadArgs) error {
- if args.Recursive {
- return self.downloadRecursive(args)
- }
-
- f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "mimeType", "md5Checksum").Do()
- if err != nil {
- return fmt.Errorf("Failed to get file: %s", err)
- }
-
- if isDir(f) {
- return fmt.Errorf("'%s' is a directory, use --recursive to download directories", f.Name)
- }
-
- if !isBinary(f) {
- return fmt.Errorf("'%s' is a google document and must be exported, see the export command", f.Name)
- }
-
- bytes, rate, err := self.downloadBinary(f, args)
-
- if !args.Stdout {
- fmt.Fprintf(args.Out, "Downloaded %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(bytes, false))
- }
-
- if args.Delete {
- err = self.deleteFile(args.Id)
- if err != nil {
- return fmt.Errorf("Failed to delete file: %s", err)
- }
-
- if !args.Stdout {
- fmt.Fprintf(args.Out, "Removed %s\n", args.Id)
- }
- }
- return err
+ if args.Recursive {
+ return self.downloadRecursive(args)
+ }
+
+ f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "mimeType", "md5Checksum").Do()
+ if err != nil {
+ return fmt.Errorf("Failed to get file: %s", err)
+ }
+
+ if isDir(f) {
+ return fmt.Errorf("'%s' is a directory, use --recursive to download directories", f.Name)
+ }
+
+ if !isBinary(f) {
+ return fmt.Errorf("'%s' is a google document and must be exported, see the export command", f.Name)
+ }
+
+ bytes, rate, err := self.downloadBinary(f, args)
+
+ if !args.Stdout {
+ fmt.Fprintf(args.Out, "Downloaded %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(bytes, false))
+ }
+
+ if args.Delete {
+ err = self.deleteFile(args.Id)
+ if err != nil {
+ return fmt.Errorf("Failed to delete file: %s", err)
+ }
+
+ if !args.Stdout {
+ fmt.Fprintf(args.Out, "Removed %s\n", args.Id)
+ }
+ }
+ return err
}
type DownloadQueryArgs struct {
- Out io.Writer
- Progress io.Writer
- Query string
- Path string
- Force bool
- Recursive bool
+ Out io.Writer
+ Progress io.Writer
+ Query string
+ Path string
+ Force bool
+ Recursive bool
}
func (self *Drive) DownloadQuery(args DownloadQueryArgs) error {
- listArgs := listAllFilesArgs{
- query: args.Query,
- fields: []googleapi.Field{"nextPageToken", "files(id,name,mimeType,size,md5Checksum)"},
- }
- files, err := self.listAllFiles(listArgs)
- if err != nil {
- return fmt.Errorf("Failed to list files: %s", err)
- }
-
- downloadArgs := DownloadArgs{
- Out: args.Out,
- Progress: args.Progress,
- Path: args.Path,
- Force: args.Force,
- }
-
- for _, f := range files {
- if isDir(f) && args.Recursive {
- err = self.downloadDirectory(f, downloadArgs)
- } else if isBinary(f) {
- _, _, err = self.downloadBinary(f, downloadArgs)
- }
-
- if err != nil {
- return err
- }
- }
-
- return nil
+ listArgs := listAllFilesArgs{
+ query: args.Query,
+ fields: []googleapi.Field{"nextPageToken", "files(id,name,mimeType,size,md5Checksum)"},
+ }
+ files, err := self.listAllFiles(listArgs)
+ if err != nil {
+ return fmt.Errorf("Failed to list files: %s", err)
+ }
+
+ downloadArgs := DownloadArgs{
+ Out: args.Out,
+ Progress: args.Progress,
+ Path: args.Path,
+ Force: args.Force,
+ }
+
+ for _, f := range files {
+ if isDir(f) && args.Recursive {
+ err = self.downloadDirectory(f, downloadArgs)
+ } else if isBinary(f) {
+ _, _, err = self.downloadBinary(f, downloadArgs)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
}
func (self *Drive) downloadRecursive(args DownloadArgs) error {
- f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "mimeType", "md5Checksum").Do()
- if err != nil {
- return fmt.Errorf("Failed to get file: %s", err)
- }
-
- if isDir(f) {
- return self.downloadDirectory(f, args)
- } else if isBinary(f) {
- _, _, err = self.downloadBinary(f, args)
- return err
- }
-
- return nil
+ f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "mimeType", "md5Checksum").Do()
+ if err != nil {
+ return fmt.Errorf("Failed to get file: %s", err)
+ }
+
+ if isDir(f) {
+ return self.downloadDirectory(f, args)
+ } else if isBinary(f) {
+ _, _, err = self.downloadBinary(f, args)
+ return err
+ }
+
+ return nil
}
func (self *Drive) downloadBinary(f *drive.File, args DownloadArgs) (int64, int64, error) {
- // Get timeout reader wrapper and context
- timeoutReaderWrapper, ctx := getTimeoutReaderWrapperContext()
-
- res, err := self.service.Files.Get(f.Id).Context(ctx).Download()
- if err != nil {
- return 0, 0, fmt.Errorf("Failed to download file: %s", err)
- }
-
- // Close body on function exit
- defer res.Body.Close()
-
- // Path to file
- fpath := filepath.Join(args.Path, f.Name)
-
- if !args.Stdout {
- fmt.Fprintf(args.Out, "Downloading %s -> %s\n", f.Name, fpath)
- }
-
- return self.saveFile(saveFileArgs{
- out: args.Out,
- body: timeoutReaderWrapper(res.Body),
- contentLength: res.ContentLength,
- fpath: fpath,
- force: args.Force,
- stdout: args.Stdout,
- progress: args.Progress,
- })
+ // Get timeout reader wrapper and context
+ timeoutReaderWrapper, ctx := getTimeoutReaderWrapperContext()
+
+ res, err := self.service.Files.Get(f.Id).Context(ctx).Download()
+ if err != nil {
+ return 0, 0, fmt.Errorf("Failed to download file: %s", err)
+ }
+
+ // Close body on function exit
+ defer res.Body.Close()
+
+ // Path to file
+ fpath := filepath.Join(args.Path, f.Name)
+
+ if !args.Stdout {
+ fmt.Fprintf(args.Out, "Downloading %s -> %s\n", f.Name, fpath)
+ }
+
+ return self.saveFile(saveFileArgs{
+ out: args.Out,
+ body: timeoutReaderWrapper(res.Body),
+ contentLength: res.ContentLength,
+ fpath: fpath,
+ force: args.Force,
+ stdout: args.Stdout,
+ progress: args.Progress,
+ })
}
type saveFileArgs struct {
- out io.Writer
- body io.Reader
- contentLength int64
- fpath string
- force bool
- stdout bool
- progress io.Writer
+ out io.Writer
+ body io.Reader
+ contentLength int64
+ fpath string
+ force bool
+ stdout bool
+ progress io.Writer
}
func (self *Drive) saveFile(args saveFileArgs) (int64, int64, error) {
- // Wrap response body in progress reader
- srcReader := getProgressReader(args.body, args.progress, args.contentLength)
-
- if args.stdout {
- // Write file content to stdout
- _, err := io.Copy(args.out, srcReader)
- return 0, 0, err
- }
-
- // Check if file exists
- if !args.force && fileExists(args.fpath) {
- return 0, 0, fmt.Errorf("File '%s' already exists, use --force to overwrite", args.fpath)
- }
-
- // Ensure any parent directories exists
- if err := mkdir(args.fpath); err != nil {
- return 0, 0, err
- }
-
- // Download to tmp file
- tmpPath := args.fpath + ".incomplete"
-
- // Create new file
- outFile, err := os.Create(tmpPath)
- if err != nil {
- return 0, 0, fmt.Errorf("Unable to create new file: %s", err)
- }
-
- started := time.Now()
-
- // Save file to disk
- bytes, err := io.Copy(outFile, srcReader)
- if err != nil {
- outFile.Close()
- os.Remove(tmpPath)
- return 0, 0, fmt.Errorf("Failed saving file: %s", err)
- }
-
- // Calculate average download rate
- rate := calcRate(bytes, started, time.Now())
-
- //if deleteSourceFile {
- // self.Delete(args.Id)
- //}
-
- // Close File
- outFile.Close()
-
- // Rename tmp file to proper filename
- return bytes, rate, os.Rename(tmpPath, args.fpath)
+ // Wrap response body in progress reader
+ srcReader := getProgressReader(args.body, args.progress, args.contentLength)
+
+ if args.stdout {
+ // Write file content to stdout
+ _, err := io.Copy(args.out, srcReader)
+ return 0, 0, err
+ }
+
+ // Check if file exists
+ if !args.force && fileExists(args.fpath) {
+ return 0, 0, fmt.Errorf("File '%s' already exists, use --force to overwrite", args.fpath)
+ }
+
+ // Ensure any parent directories exists
+ if err := mkdir(args.fpath); err != nil {
+ return 0, 0, err
+ }
+
+ // Download to tmp file
+ tmpPath := args.fpath + ".incomplete"
+
+ // Create new file
+ outFile, err := os.Create(tmpPath)
+ if err != nil {
+ return 0, 0, fmt.Errorf("Unable to create new file: %s", err)
+ }
+
+ started := time.Now()
+
+ // Save file to disk
+ bytes, err := io.Copy(outFile, srcReader)
+ if err != nil {
+ outFile.Close()
+ os.Remove(tmpPath)
+ return 0, 0, fmt.Errorf("Failed saving file: %s", err)
+ }
+
+ // Calculate average download rate
+ rate := calcRate(bytes, started, time.Now())
+
+ //if deleteSourceFile {
+ // self.Delete(args.Id)
+ //}
+
+ // Close File
+ outFile.Close()
+
+ // Rename tmp file to proper filename
+ return bytes, rate, os.Rename(tmpPath, args.fpath)
}
func (self *Drive) downloadDirectory(parent *drive.File, args DownloadArgs) error {
- listArgs := listAllFilesArgs{
- query: fmt.Sprintf("'%s' in parents", parent.Id),
- fields: []googleapi.Field{"nextPageToken", "files(id,name)"},
- }
- files, err := self.listAllFiles(listArgs)
- if err != nil {
- return fmt.Errorf("Failed listing files: %s", err)
- }
-
- newPath := filepath.Join(args.Path, parent.Name)
-
- for _, f := range files {
- // Copy args and update changed fields
- newArgs := args
- newArgs.Path = newPath
- newArgs.Id = f.Id
- newArgs.Stdout = false
-
- err = self.downloadRecursive(newArgs)
- if err != nil {
- return err
- }
- }
-
- return nil
+ listArgs := listAllFilesArgs{
+ query: fmt.Sprintf("'%s' in parents", parent.Id),
+ fields: []googleapi.Field{"nextPageToken", "files(id,name)"},
+ }
+ files, err := self.listAllFiles(listArgs)
+ if err != nil {
+ return fmt.Errorf("Failed listing files: %s", err)
+ }
+
+ newPath := filepath.Join(args.Path, parent.Name)
+
+ for _, f := range files {
+ // Copy args and update changed fields
+ newArgs := args
+ newArgs.Path = newPath
+ newArgs.Id = f.Id
+ newArgs.Stdout = false
+
+ err = self.downloadRecursive(newArgs)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
}
func isDir(f *drive.File) bool {
- return f.MimeType == DirectoryMimeType
+ return f.MimeType == DirectoryMimeType
}
func isBinary(f *drive.File) bool {
- return f.Md5Checksum != ""
+ return f.Md5Checksum != ""
}
diff --git a/drive/drive.go b/drive/drive.go
index d908beb..696f5d5 100644
--- a/drive/drive.go
+++ b/drive/drive.go
@@ -1,19 +1,19 @@
package drive
import (
- "net/http"
- "google.golang.org/api/drive/v3"
+ "google.golang.org/api/drive/v3"
+ "net/http"
)
type Drive struct {
- service *drive.Service
+ service *drive.Service
}
func New(client *http.Client) (*Drive, error) {
- service, err := drive.New(client)
- if err != nil {
- return nil, err
- }
+ service, err := drive.New(client)
+ if err != nil {
+ return nil, err
+ }
- return &Drive{service}, nil
+ return &Drive{service}, nil
}
diff --git a/drive/errors.go b/drive/errors.go
index 703dae5..e7631f7 100644
--- a/drive/errors.go
+++ b/drive/errors.go
@@ -1,22 +1,22 @@
package drive
import (
- "google.golang.org/api/googleapi"
- "time"
+ "google.golang.org/api/googleapi"
+ "time"
)
const MaxBackendErrorRetries = 5
func isBackendError(err error) bool {
- if err == nil {
- return false
- }
+ if err == nil {
+ return false
+ }
- ae, ok := err.(*googleapi.Error)
- return ok && ae.Code >= 500 && ae.Code <= 599
+ ae, ok := err.(*googleapi.Error)
+ return ok && ae.Code >= 500 && ae.Code <= 599
}
func exponentialBackoffSleep(try int) {
- seconds := pow(2, try)
- time.Sleep(time.Duration(seconds) * time.Second)
+ seconds := pow(2, try)
+ time.Sleep(time.Duration(seconds) * time.Second)
}
diff --git a/drive/export.go b/drive/export.go
index c90bc10..3fdd45a 100644
--- a/drive/export.go
+++ b/drive/export.go
@@ -1,111 +1,111 @@
package drive
import (
- "io"
- "os"
- "fmt"
- "mime"
+ "fmt"
+ "io"
+ "mime"
+ "os"
)
var DefaultExportMime = map[string]string{
- "application/vnd.google-apps.form": "application/zip",
- "application/vnd.google-apps.document": "application/pdf",
- "application/vnd.google-apps.drawing": "image/svg+xml",
- "application/vnd.google-apps.spreadsheet": "text/csv",
- "application/vnd.google-apps.script": "application/vnd.google-apps.script+json",
- "application/vnd.google-apps.presentation": "application/pdf",
+ "application/vnd.google-apps.form": "application/zip",
+ "application/vnd.google-apps.document": "application/pdf",
+ "application/vnd.google-apps.drawing": "image/svg+xml",
+ "application/vnd.google-apps.spreadsheet": "text/csv",
+ "application/vnd.google-apps.script": "application/vnd.google-apps.script+json",
+ "application/vnd.google-apps.presentation": "application/pdf",
}
type ExportArgs struct {
- Out io.Writer
- Id string
- PrintMimes bool
- Mime string
- Force bool
+ Out io.Writer
+ Id string
+ PrintMimes bool
+ Mime string
+ Force bool
}
func (self *Drive) Export(args ExportArgs) error {
- f, err := self.service.Files.Get(args.Id).Fields("name", "mimeType").Do()
- if err != nil {
- return fmt.Errorf("Failed to get file: %s", err)
- }
-
- if args.PrintMimes {
- return self.printMimes(args.Out, f.MimeType)
- }
-
- exportMime, err := getExportMime(args.Mime, f.MimeType)
- if err != nil {
- return err
- }
-
- filename := getExportFilename(f.Name, exportMime)
-
- res, err := self.service.Files.Export(args.Id, exportMime).Download()
- if err != nil {
- return fmt.Errorf("Failed to download file: %s", err)
- }
-
- // Close body on function exit
- defer res.Body.Close()
-
- // Check if file exists
- if !args.Force && fileExists(filename) {
- return fmt.Errorf("File '%s' already exists, use --force to overwrite", filename)
- }
-
- // Create new file
- outFile, err := os.Create(filename)
- if err != nil {
- return fmt.Errorf("Unable to create new file '%s': %s", filename, err)
- }
-
- // Close file on function exit
- defer outFile.Close()
-
- // Save file to disk
- _, err = io.Copy(outFile, res.Body)
- if err != nil {
- return fmt.Errorf("Failed saving file: %s", err)
- }
-
- fmt.Fprintf(args.Out, "Exported '%s' with mime type: '%s'\n", filename, exportMime)
- return nil
+ f, err := self.service.Files.Get(args.Id).Fields("name", "mimeType").Do()
+ if err != nil {
+ return fmt.Errorf("Failed to get file: %s", err)
+ }
+
+ if args.PrintMimes {
+ return self.printMimes(args.Out, f.MimeType)
+ }
+
+ exportMime, err := getExportMime(args.Mime, f.MimeType)
+ if err != nil {
+ return err
+ }
+
+ filename := getExportFilename(f.Name, exportMime)
+
+ res, err := self.service.Files.Export(args.Id, exportMime).Download()
+ if err != nil {
+ return fmt.Errorf("Failed to download file: %s", err)
+ }
+
+ // Close body on function exit
+ defer res.Body.Close()
+
+ // Check if file exists
+ if !args.Force && fileExists(filename) {
+ return fmt.Errorf("File '%s' already exists, use --force to overwrite", filename)
+ }
+
+ // Create new file
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return fmt.Errorf("Unable to create new file '%s': %s", filename, err)
+ }
+
+ // Close file on function exit
+ defer outFile.Close()
+
+ // Save file to disk
+ _, err = io.Copy(outFile, res.Body)
+ if err != nil {
+ return fmt.Errorf("Failed saving file: %s", err)
+ }
+
+ fmt.Fprintf(args.Out, "Exported '%s' with mime type: '%s'\n", filename, exportMime)
+ return nil
}
func (self *Drive) printMimes(out io.Writer, mimeType string) error {
- about, err := self.service.About.Get().Fields("exportFormats").Do()
- if err != nil {
- return fmt.Errorf("Failed to get about: %s", err)
- }
-
- mimes, ok := about.ExportFormats[mimeType]
- if !ok {
- return fmt.Errorf("File with type '%s' cannot be exported", mimeType)
- }
-
- fmt.Fprintf(out, "Available mime types: %s\n", formatList(mimes))
- return nil
+ about, err := self.service.About.Get().Fields("exportFormats").Do()
+ if err != nil {
+ return fmt.Errorf("Failed to get about: %s", err)
+ }
+
+ mimes, ok := about.ExportFormats[mimeType]
+ if !ok {
+ return fmt.Errorf("File with type '%s' cannot be exported", mimeType)
+ }
+
+ fmt.Fprintf(out, "Available mime types: %s\n", formatList(mimes))
+ return nil
}
func getExportMime(userMime, fileMime string) (string, error) {
- if userMime != "" {
- return userMime, nil
- }
+ if userMime != "" {
+ return userMime, nil
+ }
- defaultMime, ok := DefaultExportMime[fileMime]
- if !ok {
- return "", fmt.Errorf("File with type '%s' does not have a default export mime, and can probably not be exported", fileMime)
- }
+ defaultMime, ok := DefaultExportMime[fileMime]
+ if !ok {
+ return "", fmt.Errorf("File with type '%s' does not have a default export mime, and can probably not be exported", fileMime)
+ }
- return defaultMime, nil
+ return defaultMime, nil
}
func getExportFilename(name, mimeType string) string {
- extensions, err := mime.ExtensionsByType(mimeType)
- if err != nil || len(extensions) == 0 {
- return name
- }
+ extensions, err := mime.ExtensionsByType(mimeType)
+ if err != nil || len(extensions) == 0 {
+ return name
+ }
- return name + extensions[0]
+ return name + extensions[0]
}
diff --git a/drive/import.go b/drive/import.go
index a3d8b3b..2ee5f1e 100644
--- a/drive/import.go
+++ b/drive/import.go
@@ -1,53 +1,53 @@
package drive
import (
- "io"
- "io/ioutil"
- "fmt"
- "strings"
- "mime"
- "path/filepath"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime"
+ "path/filepath"
+ "strings"
)
type ImportArgs struct {
- Out io.Writer
- Progress io.Writer
- Path string
- Parents []string
+ Out io.Writer
+ Progress io.Writer
+ Path string
+ Parents []string
}
func (self *Drive) Import(args ImportArgs) error {
- fromMime := getMimeType(args.Path)
- if fromMime == "" {
- return fmt.Errorf("Could not determine mime type of file")
- }
-
- about, err := self.service.About.Get().Fields("importFormats").Do()
- if err != nil {
- return fmt.Errorf("Failed to get about: %s", err)
- }
-
- toMimes, ok := about.ImportFormats[fromMime]
- if !ok || len(toMimes) == 0 {
- return fmt.Errorf("Mime type '%s' is not supported for import", fromMime)
- }
-
- f, _, err := self.uploadFile(UploadArgs{
- Out: ioutil.Discard,
- Progress: args.Progress,
- Path: args.Path,
- Parents: args.Parents,
- Mime: toMimes[0],
- })
- if err != nil {
- return err
- }
-
- fmt.Fprintf(args.Out, "Imported %s with mime type: '%s'\n", f.Id, toMimes[0])
- return nil
+ fromMime := getMimeType(args.Path)
+ if fromMime == "" {
+ return fmt.Errorf("Could not determine mime type of file")
+ }
+
+ about, err := self.service.About.Get().Fields("importFormats").Do()
+ if err != nil {
+ return fmt.Errorf("Failed to get about: %s", err)
+ }
+
+ toMimes, ok := about.ImportFormats[fromMime]
+ if !ok || len(toMimes) == 0 {
+ return fmt.Errorf("Mime type '%s' is not supported for import", fromMime)
+ }
+
+ f, _, err := self.uploadFile(UploadArgs{
+ Out: ioutil.Discard,
+ Progress: args.Progress,
+ Path: args.Path,
+ Parents: args.Parents,
+ Mime: toMimes[0],
+ })
+ if err != nil {
+ return err
+ }
+
+ fmt.Fprintf(args.Out, "Imported %s with mime type: '%s'\n", f.Id, toMimes[0])
+ return nil
}
func getMimeType(path string) string {
- t := mime.TypeByExtension(filepath.Ext(path))
- return strings.Split(t, ";")[0]
+ t := mime.TypeByExtension(filepath.Ext(path))
+ return strings.Split(t, ";")[0]
}
diff --git a/drive/info.go b/drive/info.go
index aa190a8..c6f4471 100644
--- a/drive/info.go
+++ b/drive/info.go
@@ -1,68 +1,68 @@
package drive
import (
- "io"
- "fmt"
- "google.golang.org/api/drive/v3"
+ "fmt"
+ "google.golang.org/api/drive/v3"
+ "io"
)
type FileInfoArgs struct {
- Out io.Writer
- Id string
- SizeInBytes bool
+ Out io.Writer
+ Id string
+ SizeInBytes bool
}
func (self *Drive) Info(args FileInfoArgs) error {
- f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "createdTime", "modifiedTime", "md5Checksum", "mimeType", "parents", "shared", "description", "webContentLink", "webViewLink").Do()
- if err != nil {
- return fmt.Errorf("Failed to get file: %s", err)
- }
+ f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "createdTime", "modifiedTime", "md5Checksum", "mimeType", "parents", "shared", "description", "webContentLink", "webViewLink").Do()
+ if err != nil {
+ return fmt.Errorf("Failed to get file: %s", err)
+ }
- pathfinder := self.newPathfinder()
- absPath, err := pathfinder.absPath(f)
- if err != nil {
- return err
- }
+ pathfinder := self.newPathfinder()
+ absPath, err := pathfinder.absPath(f)
+ if err != nil {
+ return err
+ }
- PrintFileInfo(PrintFileInfoArgs{
- Out: args.Out,
- File: f,
- Path: absPath,
- SizeInBytes: args.SizeInBytes,
- })
+ PrintFileInfo(PrintFileInfoArgs{
+ Out: args.Out,
+ File: f,
+ Path: absPath,
+ SizeInBytes: args.SizeInBytes,
+ })
- return nil
+ return nil
}
type PrintFileInfoArgs struct {
- Out io.Writer
- File *drive.File
- Path string
- SizeInBytes bool
+ Out io.Writer
+ File *drive.File
+ Path string
+ SizeInBytes bool
}
func PrintFileInfo(args PrintFileInfoArgs) {
- f := args.File
+ f := args.File
- items := []kv{
- kv{"Id", f.Id},
- kv{"Name", f.Name},
- kv{"Path", args.Path},
- kv{"Description", f.Description},
- kv{"Mime", f.MimeType},
- kv{"Size", formatSize(f.Size, args.SizeInBytes)},
- kv{"Created", formatDatetime(f.CreatedTime)},
- kv{"Modified", formatDatetime(f.ModifiedTime)},
- kv{"Md5sum", f.Md5Checksum},
- kv{"Shared", formatBool(f.Shared)},
- kv{"Parents", formatList(f.Parents)},
- kv{"ViewUrl", f.WebViewLink},
- kv{"DownloadUrl", f.WebContentLink},
- }
+ items := []kv{
+ kv{"Id", f.Id},
+ kv{"Name", f.Name},
+ kv{"Path", args.Path},
+ kv{"Description", f.Description},
+ kv{"Mime", f.MimeType},
+ kv{"Size", formatSize(f.Size, args.SizeInBytes)},
+ kv{"Created", formatDatetime(f.CreatedTime)},
+ kv{"Modified", formatDatetime(f.ModifiedTime)},
+ kv{"Md5sum", f.Md5Checksum},
+ kv{"Shared", formatBool(f.Shared)},
+ kv{"Parents", formatList(f.Parents)},
+ kv{"ViewUrl", f.WebViewLink},
+ kv{"DownloadUrl", f.WebContentLink},
+ }
- for _, item := range items {
- if item.value != "" {
- fmt.Fprintf(args.Out, "%s: %s\n", item.key, item.value)
- }
- }
+ for _, item := range items {
+ if item.value != "" {
+ fmt.Fprintf(args.Out, "%s: %s\n", item.key, item.value)
+ }
+ }
}
diff --git a/drive/list.go b/drive/list.go
index 73fdea5..ab8aca5 100644
--- a/drive/list.go
+++ b/drive/list.go
@@ -1,136 +1,136 @@
package drive
import (
- "fmt"
- "io"
- "text/tabwriter"
- "golang.org/x/net/context"
- "google.golang.org/api/drive/v3"
- "google.golang.org/api/googleapi"
+ "fmt"
+ "golang.org/x/net/context"
+ "google.golang.org/api/drive/v3"
+ "google.golang.org/api/googleapi"
+ "io"
+ "text/tabwriter"
)
type ListFilesArgs struct {
- Out io.Writer
- MaxFiles int64
- NameWidth int64
- Query string
- SortOrder string
- SkipHeader bool
- SizeInBytes bool
- AbsPath bool
+ Out io.Writer
+ MaxFiles int64
+ NameWidth int64
+ Query string
+ SortOrder string
+ SkipHeader bool
+ SizeInBytes bool
+ AbsPath bool
}
func (self *Drive) List(args ListFilesArgs) (err error) {
- listArgs := listAllFilesArgs{
- query: args.Query,
- fields: []googleapi.Field{"nextPageToken", "files(id,name,md5Checksum,mimeType,size,createdTime,parents)"},
- sortOrder: args.SortOrder,
- maxFiles: args.MaxFiles,
- }
- files, err := self.listAllFiles(listArgs)
- if err != nil {
- return fmt.Errorf("Failed to list files: %s", err)
- }
-
- pathfinder := self.newPathfinder()
-
- if args.AbsPath {
- // Replace name with absolute path
- for _, f := range files {
- f.Name, err = pathfinder.absPath(f)
- if err != nil {
- return err
- }
- }
- }
-
- PrintFileList(PrintFileListArgs{
- Out: args.Out,
- Files: files,
- NameWidth: int(args.NameWidth),
- SkipHeader: args.SkipHeader,
- SizeInBytes: args.SizeInBytes,
- })
-
- return
+ listArgs := listAllFilesArgs{
+ query: args.Query,
+ fields: []googleapi.Field{"nextPageToken", "files(id,name,md5Checksum,mimeType,size,createdTime,parents)"},
+ sortOrder: args.SortOrder,
+ maxFiles: args.MaxFiles,
+ }
+ files, err := self.listAllFiles(listArgs)
+ if err != nil {
+ return fmt.Errorf("Failed to list files: %s", err)
+ }
+
+ pathfinder := self.newPathfinder()
+
+ if args.AbsPath {
+ // Replace name with absolute path
+ for _, f := range files {
+ f.Name, err = pathfinder.absPath(f)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ PrintFileList(PrintFileListArgs{
+ Out: args.Out,
+ Files: files,
+ NameWidth: int(args.NameWidth),
+ SkipHeader: args.SkipHeader,
+ SizeInBytes: args.SizeInBytes,
+ })
+
+ return
}
type listAllFilesArgs struct {
- query string
- fields []googleapi.Field
- sortOrder string
- maxFiles int64
+ query string
+ fields []googleapi.Field
+ sortOrder string
+ maxFiles int64
}
func (self *Drive) listAllFiles(args listAllFilesArgs) ([]*drive.File, error) {
- var files []*drive.File
+ var files []*drive.File
- var pageSize int64
- if args.maxFiles > 0 && args.maxFiles < 1000 {
- pageSize = args.maxFiles
- } else {
- pageSize = 1000
- }
+ var pageSize int64
+ if args.maxFiles > 0 && args.maxFiles < 1000 {
+ pageSize = args.maxFiles
+ } else {
+ pageSize = 1000
+ }
- controlledStop := fmt.Errorf("Controlled stop")
+ controlledStop := fmt.Errorf("Controlled stop")
- err := self.service.Files.List().Q(args.query).Fields(args.fields...).OrderBy(args.sortOrder).PageSize(pageSize).Pages(context.TODO(), func(fl *drive.FileList) error {
- files = append(files, fl.Files...)
+ err := self.service.Files.List().Q(args.query).Fields(args.fields...).OrderBy(args.sortOrder).PageSize(pageSize).Pages(context.TODO(), func(fl *drive.FileList) error {
+ files = append(files, fl.Files...)
- // Stop when we have all the files we need
- if args.maxFiles > 0 && len(files) >= int(args.maxFiles) {
- return controlledStop
- }
+ // Stop when we have all the files we need
+ if args.maxFiles > 0 && len(files) >= int(args.maxFiles) {
+ return controlledStop
+ }
- return nil
- })
+ return nil
+ })
- if err != nil && err != controlledStop {
- return nil, err
- }
+ if err != nil && err != controlledStop {
+ return nil, err
+ }
- if args.maxFiles > 0 {
- n := min(len(files), int(args.maxFiles))
- return files[:n], nil
- }
+ if args.maxFiles > 0 {
+ n := min(len(files), int(args.maxFiles))
+ return files[:n], nil
+ }
- return files, nil
+ return files, nil
}
type PrintFileListArgs struct {
- Out io.Writer
- Files []*drive.File
- NameWidth int
- SkipHeader bool
- SizeInBytes bool
+ Out io.Writer
+ Files []*drive.File
+ NameWidth int
+ SkipHeader bool
+ SizeInBytes bool
}
func PrintFileList(args PrintFileListArgs) {
- w := new(tabwriter.Writer)
- w.Init(args.Out, 0, 0, 3, ' ', 0)
-
- if !args.SkipHeader {
- fmt.Fprintln(w, "Id\tName\tType\tSize\tCreated")
- }
-
- for _, f := range args.Files {
- fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n",
- f.Id,
- truncateString(f.Name, args.NameWidth),
- filetype(f),
- formatSize(f.Size, args.SizeInBytes),
- formatDatetime(f.CreatedTime),
- )
- }
-
- w.Flush()
+ w := new(tabwriter.Writer)
+ w.Init(args.Out, 0, 0, 3, ' ', 0)
+
+ if !args.SkipHeader {
+ fmt.Fprintln(w, "Id\tName\tType\tSize\tCreated")
+ }
+
+ for _, f := range args.Files {
+ fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n",
+ f.Id,
+ truncateString(f.Name, args.NameWidth),
+ filetype(f),
+ formatSize(f.Size, args.SizeInBytes),
+ formatDatetime(f.CreatedTime),
+ )
+ }
+
+ w.Flush()
}
func filetype(f *drive.File) string {
- if isDir(f) {
- return "dir"
- } else if isBinary(f) {
- return "bin"
- }
- return "doc"
+ if isDir(f) {
+ return "dir"
+ } else if isBinary(f) {
+ return "bin"
+ }
+ return "doc"
}
diff --git a/drive/mkdir.go b/drive/mkdir.go
index f6f0641..8eea210 100644
--- a/drive/mkdir.go
+++ b/drive/mkdir.go
@@ -1,39 +1,39 @@
package drive
import (
- "google.golang.org/api/drive/v3"
- "io"
- "fmt"
+ "fmt"
+ "google.golang.org/api/drive/v3"
+ "io"
)
const DirectoryMimeType = "application/vnd.google-apps.folder"
type MkdirArgs struct {
- Out io.Writer
- Name string
- Parents []string
+ Out io.Writer
+ Name string
+ Parents []string
}
func (self *Drive) Mkdir(args MkdirArgs) error {
- f, err := self.mkdir(args)
- if err != nil {
- return err
- }
- fmt.Fprintf(args.Out, "Directory %s created\n", f.Id)
- return nil
+ f, err := self.mkdir(args)
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(args.Out, "Directory %s created\n", f.Id)
+ return nil
}
func (self *Drive) mkdir(args MkdirArgs) (*drive.File, error) {
- dstFile := &drive.File{Name: args.Name, MimeType: DirectoryMimeType}
+ dstFile := &drive.File{Name: args.Name, MimeType: DirectoryMimeType}
- // Set parent folders
- dstFile.Parents = args.Parents
+ // Set parent folders
+ dstFile.Parents = args.Parents
- // Create directory
- f, err := self.service.Files.Create(dstFile).Do()
- if err != nil {
- return nil, fmt.Errorf("Failed to create directory: %s", err)
- }
+ // Create directory
+ f, err := self.service.Files.Create(dstFile).Do()
+ if err != nil {
+ return nil, fmt.Errorf("Failed to create directory: %s", err)
+ }
- return f, nil
+ return f, nil
}
diff --git a/drive/path.go b/drive/path.go
index f5d1ad5..8043a01 100644
--- a/drive/path.go
+++ b/drive/path.go
@@ -1,65 +1,65 @@
package drive
import (
- "fmt"
- "path/filepath"
- "google.golang.org/api/drive/v3"
+ "fmt"
+ "google.golang.org/api/drive/v3"
+ "path/filepath"
)
func (self *Drive) newPathfinder() *remotePathfinder {
- return &remotePathfinder{
- service: self.service.Files,
- files: make(map[string]*drive.File),
- }
+ return &remotePathfinder{
+ service: self.service.Files,
+ files: make(map[string]*drive.File),
+ }
}
type remotePathfinder struct {
- service *drive.FilesService
- files map[string]*drive.File
+ service *drive.FilesService
+ files map[string]*drive.File
}
func (self *remotePathfinder) absPath(f *drive.File) (string, error) {
- name := f.Name
+ name := f.Name
- if len(f.Parents) == 0 {
- return name, nil
- }
+ if len(f.Parents) == 0 {
+ return name, nil
+ }
- var path []string
+ var path []string
- for {
- parent, err := self.getParent(f.Parents[0])
- if err != nil {
- return "", err
- }
+ for {
+ parent, err := self.getParent(f.Parents[0])
+ if err != nil {
+ return "", err
+ }
- // Stop when we find the root dir
- if len(parent.Parents) == 0 {
- break
- }
+ // Stop when we find the root dir
+ if len(parent.Parents) == 0 {
+ break
+ }
- path = append([]string{parent.Name}, path...)
- f = parent
- }
+ path = append([]string{parent.Name}, path...)
+ f = parent
+ }
- path = append(path, name)
- return filepath.Join(path...), nil
+ path = append(path, name)
+ return filepath.Join(path...), nil
}
func (self *remotePathfinder) getParent(id string) (*drive.File, error) {
- // Check cache
- if f, ok := self.files[id]; ok {
- return f, nil
- }
+ // Check cache
+ if f, ok := self.files[id]; ok {
+ return f, nil
+ }
- // Fetch file from drive
- f, err := self.service.Get(id).Fields("id", "name", "parents").Do()
- if err != nil {
- return nil, fmt.Errorf("Failed to get file: %s", err)
- }
+ // Fetch file from drive
+ f, err := self.service.Get(id).Fields("id", "name", "parents").Do()
+ if err != nil {
+ return nil, fmt.Errorf("Failed to get file: %s", err)
+ }
- // Save in cache
- self.files[f.Id] = f
+ // Save in cache
+ self.files[f.Id] = f
- return f, nil
+ return f, nil
}
diff --git a/drive/progress.go b/drive/progress.go
index 989191e..bb5740c 100644
--- a/drive/progress.go
+++ b/drive/progress.go
@@ -1,101 +1,101 @@
package drive
import (
- "io"
- "io/ioutil"
- "fmt"
- "time"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "time"
)
const MaxDrawInterval = time.Second * 1
const MaxRateInterval = time.Second * 3
func getProgressReader(r io.Reader, w io.Writer, size int64) io.Reader {
- // Don't wrap reader if output is discarded or size is too small
- if w == ioutil.Discard || (size > 0 && size < 1024 * 1024) {
- return r
- }
-
- return &Progress{
- Reader: r,
- Writer: w,
- Size: size,
- }
+ // Don't wrap reader if output is discarded or size is too small
+ if w == ioutil.Discard || (size > 0 && size < 1024*1024) {
+ return r
+ }
+
+ return &Progress{
+ Reader: r,
+ Writer: w,
+ Size: size,
+ }
}
type Progress struct {
- Writer io.Writer
- Reader io.Reader
- Size int64
- progress int64
- rate int64
- rateProgress int64
- rateUpdated time.Time
- updated time.Time
- done bool
+ Writer io.Writer
+ Reader io.Reader
+ Size int64
+ progress int64
+ rate int64
+ rateProgress int64
+ rateUpdated time.Time
+ updated time.Time
+ done bool
}
func (self *Progress) Read(p []byte) (int, error) {
- // Read
- n, err := self.Reader.Read(p)
-
- now := time.Now()
- isLast := err != nil
-
- // Increment progress
- newProgress := self.progress + int64(n)
- self.progress = newProgress
-
- // Initialize rate state
- if self.rateUpdated.IsZero() {
- self.rateUpdated = now
- self.rateProgress = newProgress
- }
-
- // Update rate every x seconds
- if self.rateUpdated.Add(MaxRateInterval).Before(now) {
- self.rate = calcRate(newProgress - self.rateProgress, self.rateUpdated, now)
- self.rateUpdated = now
- self.rateProgress = newProgress
- }
-
- // Draw progress every x seconds
- if self.updated.Add(MaxDrawInterval).Before(now) || isLast {
- self.draw(isLast)
- self.updated = now
- }
-
- // Mark as done if error occurs
- self.done = isLast
-
- return n, err
+ // Read
+ n, err := self.Reader.Read(p)
+
+ now := time.Now()
+ isLast := err != nil
+
+ // Increment progress
+ newProgress := self.progress + int64(n)
+ self.progress = newProgress
+
+ // Initialize rate state
+ if self.rateUpdated.IsZero() {
+ self.rateUpdated = now
+ self.rateProgress = newProgress
+ }
+
+ // Update rate every x seconds
+ if self.rateUpdated.Add(MaxRateInterval).Before(now) {
+ self.rate = calcRate(newProgress-self.rateProgress, self.rateUpdated, now)
+ self.rateUpdated = now
+ self.rateProgress = newProgress
+ }
+
+ // Draw progress every x seconds
+ if self.updated.Add(MaxDrawInterval).Before(now) || isLast {
+ self.draw(isLast)
+ self.updated = now
+ }
+
+ // Mark as done if error occurs
+ self.done = isLast
+
+ return n, err
}
func (self *Progress) draw(isLast bool) {
- if self.done {
- return
- }
+ if self.done {
+ return
+ }
- self.clear()
+ self.clear()
- // Print progress
- fmt.Fprintf(self.Writer, "%s", formatSize(self.progress, false))
+ // Print progress
+ fmt.Fprintf(self.Writer, "%s", formatSize(self.progress, false))
- // Print total size
- if self.Size > 0 {
- fmt.Fprintf(self.Writer, "/%s", formatSize(self.Size, false))
- }
+ // Print total size
+ if self.Size > 0 {
+ fmt.Fprintf(self.Writer, "/%s", formatSize(self.Size, false))
+ }
- // Print rate
- if self.rate > 0 {
- fmt.Fprintf(self.Writer, ", Rate: %s/s", formatSize(self.rate, false))
- }
+ // Print rate
+ if self.rate > 0 {
+ fmt.Fprintf(self.Writer, ", Rate: %s/s", formatSize(self.rate, false))
+ }
- if isLast {
- self.clear()
- }
+ if isLast {
+ self.clear()
+ }
}
func (self *Progress) clear() {
- fmt.Fprintf(self.Writer, "\r%50s\r", "")
+ fmt.Fprintf(self.Writer, "\r%50s\r", "")
}
diff --git a/drive/revision_delete.go b/drive/revision_delete.go
index 88c81c6..de53041 100644
--- a/drive/revision_delete.go
+++ b/drive/revision_delete.go
@@ -1,31 +1,31 @@
package drive
import (
- "io"
- "fmt"
+ "fmt"
+ "io"
)
type DeleteRevisionArgs struct {
- Out io.Writer
- FileId string
- RevisionId string
+ Out io.Writer
+ FileId string
+ RevisionId string
}
func (self *Drive) DeleteRevision(args DeleteRevisionArgs) (err error) {
- rev, err := self.service.Revisions.Get(args.FileId, args.RevisionId).Fields("originalFilename").Do()
- if err != nil {
- return fmt.Errorf("Failed to get revision: %s", err)
- }
+ rev, err := self.service.Revisions.Get(args.FileId, args.RevisionId).Fields("originalFilename").Do()
+ if err != nil {
+ return fmt.Errorf("Failed to get revision: %s", err)
+ }
- if rev.OriginalFilename == "" {
- return fmt.Errorf("Deleting revisions for this file type is not supported")
- }
+ if rev.OriginalFilename == "" {
+ return fmt.Errorf("Deleting revisions for this file type is not supported")
+ }
- err = self.service.Revisions.Delete(args.FileId, args.RevisionId).Do()
- if err != nil {
- return fmt.Errorf("Failed to delete revision", err)
- }
+ err = self.service.Revisions.Delete(args.FileId, args.RevisionId).Do()
+ if err != nil {
+ return fmt.Errorf("Failed to delete revision", err)
+ }
- fmt.Fprintf(args.Out, "Deleted revision '%s'\n", args.RevisionId)
- return
+ fmt.Fprintf(args.Out, "Deleted revision '%s'\n", args.RevisionId)
+ return
}
diff --git a/drive/revision_download.go b/drive/revision_download.go
index 039cd19..04055fa 100644
--- a/drive/revision_download.go
+++ b/drive/revision_download.go
@@ -1,70 +1,70 @@
package drive
import (
- "fmt"
- "path/filepath"
- "io"
- "io/ioutil"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "path/filepath"
)
type DownloadRevisionArgs struct {
- Out io.Writer
- Progress io.Writer
- FileId string
- RevisionId string
- Path string
- Force bool
- Stdout bool
+ Out io.Writer
+ Progress io.Writer
+ FileId string
+ RevisionId string
+ Path string
+ Force bool
+ Stdout bool
}
func (self *Drive) DownloadRevision(args DownloadRevisionArgs) (err error) {
- getRev := self.service.Revisions.Get(args.FileId, args.RevisionId)
+ getRev := self.service.Revisions.Get(args.FileId, args.RevisionId)
- rev, err := getRev.Fields("originalFilename").Do()
- if err != nil {
- return fmt.Errorf("Failed to get file: %s", err)
- }
+ rev, err := getRev.Fields("originalFilename").Do()
+ if err != nil {
+ return fmt.Errorf("Failed to get file: %s", err)
+ }
- if rev.OriginalFilename == "" {
- return fmt.Errorf("Download is not supported for this file type")
- }
+ if rev.OriginalFilename == "" {
+ return fmt.Errorf("Download is not supported for this file type")
+ }
- // Get timeout reader wrapper and context
- timeoutReaderWrapper, ctx := getTimeoutReaderWrapperContext()
+ // Get timeout reader wrapper and context
+ timeoutReaderWrapper, ctx := getTimeoutReaderWrapperContext()
- res, err := getRev.Context(ctx).Download()
- if err != nil {
- return fmt.Errorf("Failed to download file: %s", err)
- }
+ res, err := getRev.Context(ctx).Download()
+ if err != nil {
+ return fmt.Errorf("Failed to download file: %s", err)
+ }
- // Close body on function exit
- defer res.Body.Close()
+ // Close body on function exit
+ defer res.Body.Close()
- // Discard other output if file is written to stdout
- out := args.Out
- if args.Stdout {
- out = ioutil.Discard
- }
+ // Discard other output if file is written to stdout
+ out := args.Out
+ if args.Stdout {
+ out = ioutil.Discard
+ }
- // Path to file
- fpath := filepath.Join(args.Path, rev.OriginalFilename)
+ // Path to file
+ fpath := filepath.Join(args.Path, rev.OriginalFilename)
- fmt.Fprintf(out, "Downloading %s -> %s\n", rev.OriginalFilename, fpath)
+ fmt.Fprintf(out, "Downloading %s -> %s\n", rev.OriginalFilename, fpath)
- bytes, rate, err := self.saveFile(saveFileArgs{
- out: args.Out,
- body: timeoutReaderWrapper(res.Body),
- contentLength: res.ContentLength,
- fpath: fpath,
- force: args.Force,
- stdout: args.Stdout,
- progress: args.Progress,
- })
+ bytes, rate, err := self.saveFile(saveFileArgs{
+ out: args.Out,
+ body: timeoutReaderWrapper(res.Body),
+ contentLength: res.ContentLength,
+ fpath: fpath,
+ force: args.Force,
+ stdout: args.Stdout,
+ progress: args.Progress,
+ })
- if err != nil {
- return err
- }
+ if err != nil {
+ return err
+ }
- fmt.Fprintf(out, "Download complete, rate: %s/s, total size: %s\n", formatSize(rate, false), formatSize(bytes, false))
- return nil
+ fmt.Fprintf(out, "Download complete, rate: %s/s, total size: %s\n", formatSize(rate, false), formatSize(bytes, false))
+ return nil
}
diff --git a/drive/revision_list.go b/drive/revision_list.go
index 941fbca..eec4dab 100644
--- a/drive/revision_list.go
+++ b/drive/revision_list.go
@@ -1,62 +1,62 @@
package drive
import (
- "fmt"
- "io"
- "text/tabwriter"
- "google.golang.org/api/drive/v3"
+ "fmt"
+ "google.golang.org/api/drive/v3"
+ "io"
+ "text/tabwriter"
)
type ListRevisionsArgs struct {
- Out io.Writer
- Id string
- NameWidth int64
- SkipHeader bool
- SizeInBytes bool
+ Out io.Writer
+ Id string
+ NameWidth int64
+ SkipHeader bool
+ SizeInBytes bool
}
func (self *Drive) ListRevisions(args ListRevisionsArgs) (err error) {
- revList, err := self.service.Revisions.List(args.Id).Fields("revisions(id,keepForever,size,modifiedTime,originalFilename)").Do()
- if err != nil {
- return fmt.Errorf("Failed listing revisions: %s", err)
- }
-
- PrintRevisionList(PrintRevisionListArgs{
- Out: args.Out,
- Revisions: revList.Revisions,
- NameWidth: int(args.NameWidth),
- SkipHeader: args.SkipHeader,
- SizeInBytes: args.SizeInBytes,
- })
-
- return
+ revList, err := self.service.Revisions.List(args.Id).Fields("revisions(id,keepForever,size,modifiedTime,originalFilename)").Do()
+ if err != nil {
+ return fmt.Errorf("Failed listing revisions: %s", err)
+ }
+
+ PrintRevisionList(PrintRevisionListArgs{
+ Out: args.Out,
+ Revisions: revList.Revisions,
+ NameWidth: int(args.NameWidth),
+ SkipHeader: args.SkipHeader,
+ SizeInBytes: args.SizeInBytes,
+ })
+
+ return
}
type PrintRevisionListArgs struct {
- Out io.Writer
- Revisions []*drive.Revision
- NameWidth int
- SkipHeader bool
- SizeInBytes bool
+ Out io.Writer
+ Revisions []*drive.Revision
+ NameWidth int
+ SkipHeader bool
+ SizeInBytes bool
}
func PrintRevisionList(args PrintRevisionListArgs) {
- w := new(tabwriter.Writer)
- w.Init(args.Out, 0, 0, 3, ' ', 0)
-
- if !args.SkipHeader {
- fmt.Fprintln(w, "Id\tName\tSize\tModified\tKeepForever")
- }
-
- for _, rev := range args.Revisions {
- fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n",
- rev.Id,
- truncateString(rev.OriginalFilename, args.NameWidth),
- formatSize(rev.Size, args.SizeInBytes),
- formatDatetime(rev.ModifiedTime),
- formatBool(rev.KeepForever),
- )
- }
-
- w.Flush()
+ w := new(tabwriter.Writer)
+ w.Init(args.Out, 0, 0, 3, ' ', 0)
+
+ if !args.SkipHeader {
+ fmt.Fprintln(w, "Id\tName\tSize\tModified\tKeepForever")
+ }
+
+ for _, rev := range args.Revisions {
+ fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n",
+ rev.Id,
+ truncateString(rev.OriginalFilename, args.NameWidth),
+ formatSize(rev.Size, args.SizeInBytes),
+ formatDatetime(rev.ModifiedTime),
+ formatBool(rev.KeepForever),
+ )
+ }
+
+ w.Flush()
}
diff --git a/drive/share.go b/drive/share.go
index 291512a..69b9c7d 100644
--- a/drive/share.go
+++ b/drive/share.go
@@ -1,109 +1,109 @@
package drive
import (
- "io"
- "fmt"
- "text/tabwriter"
- "google.golang.org/api/drive/v3"
+ "fmt"
+ "google.golang.org/api/drive/v3"
+ "io"
+ "text/tabwriter"
)
type ShareArgs struct {
- Out io.Writer
- FileId string
- Role string
- Type string
- Email string
- Discoverable bool
+ Out io.Writer
+ FileId string
+ Role string
+ Type string
+ Email string
+ Discoverable bool
}
func (self *Drive) Share(args ShareArgs) error {
- permission := &drive.Permission{
- AllowFileDiscovery: args.Discoverable,
- Role: args.Role,
- Type: args.Type,
- EmailAddress: args.Email,
- }
-
- _, err := self.service.Permissions.Create(args.FileId, permission).Do()
- if err != nil {
- return fmt.Errorf("Failed to share file: %s", err)
- }
-
- fmt.Fprintf(args.Out, "Granted %s permission to %s\n", args.Role, args.Type)
- return nil
+ permission := &drive.Permission{
+ AllowFileDiscovery: args.Discoverable,
+ Role: args.Role,
+ Type: args.Type,
+ EmailAddress: args.Email,
+ }
+
+ _, err := self.service.Permissions.Create(args.FileId, permission).Do()
+ if err != nil {
+ return fmt.Errorf("Failed to share file: %s", err)
+ }
+
+ fmt.Fprintf(args.Out, "Granted %s permission to %s\n", args.Role, args.Type)
+ return nil
}
type RevokePermissionArgs struct {
- Out io.Writer
- FileId string
- PermissionId string
+ Out io.Writer
+ FileId string
+ PermissionId string
}
func (self *Drive) RevokePermission(args RevokePermissionArgs) error {
- err := self.service.Permissions.Delete(args.FileId, args.PermissionId).Do()
- if err != nil {
- fmt.Errorf("Failed to revoke permission: %s", err)
- return err
- }
-
- fmt.Fprintf(args.Out, "Permission revoked\n")
- return nil
+ err := self.service.Permissions.Delete(args.FileId, args.PermissionId).Do()
+ if err != nil {
+ fmt.Errorf("Failed to revoke permission: %s", err)
+ return err
+ }
+
+ fmt.Fprintf(args.Out, "Permission revoked\n")
+ return nil
}
type ListPermissionsArgs struct {
- Out io.Writer
- FileId string
+ Out io.Writer
+ FileId string
}
func (self *Drive) ListPermissions(args ListPermissionsArgs) error {
- permList, err := self.service.Permissions.List(args.FileId).Fields("permissions(id,role,type,domain,emailAddress,allowFileDiscovery)").Do()
- if err != nil {
- fmt.Errorf("Failed to list permissions: %s", err)
- return err
- }
-
- printPermissions(printPermissionsArgs{
- out: args.Out,
- permissions: permList.Permissions,
- })
- return nil
+ permList, err := self.service.Permissions.List(args.FileId).Fields("permissions(id,role,type,domain,emailAddress,allowFileDiscovery)").Do()
+ if err != nil {
+ fmt.Errorf("Failed to list permissions: %s", err)
+ return err
+ }
+
+ printPermissions(printPermissionsArgs{
+ out: args.Out,
+ permissions: permList.Permissions,
+ })
+ return nil
}
func (self *Drive) shareAnyoneReader(fileId string) error {
- permission := &drive.Permission{
- Role: "reader",
- Type: "anyone",
- }
+ permission := &drive.Permission{
+ Role: "reader",
+ Type: "anyone",
+ }
- _, err := self.service.Permissions.Create(fileId, permission).Do()
- if err != nil {
- return fmt.Errorf("Failed to share file: %s", err)
- }
+ _, err := self.service.Permissions.Create(fileId, permission).Do()
+ if err != nil {
+ return fmt.Errorf("Failed to share file: %s", err)
+ }
- return nil
+ return nil
}
type printPermissionsArgs struct {
- out io.Writer
- permissions []*drive.Permission
+ out io.Writer
+ permissions []*drive.Permission
}
func printPermissions(args printPermissionsArgs) {
- w := new(tabwriter.Writer)
- w.Init(args.out, 0, 0, 3, ' ', 0)
-
- fmt.Fprintln(w, "Id\tType\tRole\tEmail\tDomain\tDiscoverable")
-
- for _, p := range args.permissions {
- fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n",
- p.Id,
- p.Type,
- p.Role,
- p.EmailAddress,
- p.Domain,
- formatBool(p.AllowFileDiscovery),
- )
- }
-
- w.Flush()
+ w := new(tabwriter.Writer)
+ w.Init(args.out, 0, 0, 3, ' ', 0)
+
+ fmt.Fprintln(w, "Id\tType\tRole\tEmail\tDomain\tDiscoverable")
+
+ for _, p := range args.permissions {
+ fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n",
+ p.Id,
+ p.Type,
+ p.Role,
+ p.EmailAddress,
+ p.Domain,
+ formatBool(p.AllowFileDiscovery),
+ )
+ }
+
+ w.Flush()
}
diff --git a/drive/sync.go b/drive/sync.go
index 2124f8f..35ab16e 100644
--- a/drive/sync.go
+++ b/drive/sync.go
@@ -1,17 +1,17 @@
package drive
import (
- "time"
- "fmt"
- "os"
- "io"
- "strings"
- "path/filepath"
- "text/tabwriter"
- "github.com/soniakeys/graph"
- "github.com/sabhiram/go-git-ignore"
- "google.golang.org/api/drive/v3"
- "google.golang.org/api/googleapi"
+ "fmt"
+ "github.com/sabhiram/go-git-ignore"
+ "github.com/soniakeys/graph"
+ "google.golang.org/api/drive/v3"
+ "google.golang.org/api/googleapi"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "text/tabwriter"
+ "time"
)
const DefaultIgnoreFile = ".gdriveignore"
@@ -19,595 +19,607 @@ const DefaultIgnoreFile = ".gdriveignore"
type ModTime int
const (
- LocalLastModified ModTime = iota
- RemoteLastModified
- EqualModifiedTime
+ LocalLastModified ModTime = iota
+ RemoteLastModified
+ EqualModifiedTime
)
type LargestSize int
const (
- LocalLargestSize LargestSize = iota
- RemoteLargestSize
- EqualSize
+ LocalLargestSize LargestSize = iota
+ RemoteLargestSize
+ EqualSize
)
type ConflictResolution int
const (
- NoResolution ConflictResolution = iota
- KeepLocal
- KeepRemote
- KeepLargest
+ NoResolution ConflictResolution = iota
+ KeepLocal
+ KeepRemote
+ KeepLargest
)
func (self *Drive) prepareSyncFiles(localPath string, root *drive.File, cmp FileComparer) (*syncFiles, error) {
- localCh := make(chan struct{files []*LocalFile; err error})
- remoteCh := make(chan struct{files []*RemoteFile; err error})
-
- go func() {
- files, err := prepareLocalFiles(localPath)
- localCh <- struct{files []*LocalFile; err error}{files, err}
- }()
-
- go func() {
- files, err := self.prepareRemoteFiles(root, "")
- remoteCh <- struct{files []*RemoteFile; err error}{files, err}
- }()
-
- local := <-localCh
- if local.err != nil {
- return nil, local.err
- }
-
- remote := <-remoteCh
- if remote.err != nil {
- return nil, remote.err
- }
-
- return &syncFiles{
- root: &RemoteFile{file: root},
- local: local.files,
- remote: remote.files,
- compare: cmp,
- }, nil
+ localCh := make(chan struct {
+ files []*LocalFile
+ err error
+ })
+ remoteCh := make(chan struct {
+ files []*RemoteFile
+ err error
+ })
+
+ go func() {
+ files, err := prepareLocalFiles(localPath)
+ localCh <- struct {
+ files []*LocalFile
+ err error
+ }{files, err}
+ }()
+
+ go func() {
+ files, err := self.prepareRemoteFiles(root, "")
+ remoteCh <- struct {
+ files []*RemoteFile
+ err error
+ }{files, err}
+ }()
+
+ local := <-localCh
+ if local.err != nil {
+ return nil, local.err
+ }
+
+ remote := <-remoteCh
+ if remote.err != nil {
+ return nil, remote.err
+ }
+
+ return &syncFiles{
+ root: &RemoteFile{file: root},
+ local: local.files,
+ remote: remote.files,
+ compare: cmp,
+ }, nil
}
func (self *Drive) isSyncFile(id string) (bool, error) {
- f, err := self.service.Files.Get(id).Fields("appProperties").Do()
- if err != nil {
- return false, fmt.Errorf("Failed to get file: %s", err)
- }
+ f, err := self.service.Files.Get(id).Fields("appProperties").Do()
+ if err != nil {
+ return false, fmt.Errorf("Failed to get file: %s", err)
+ }
- _, ok := f.AppProperties["sync"]
- return ok, nil
+ _, ok := f.AppProperties["sync"]
+ return ok, nil
}
func prepareLocalFiles(root string) ([]*LocalFile, error) {
- var files []*LocalFile
-
- // Get absolute root path
- absRootPath, err := filepath.Abs(root)
- if err != nil {
- return nil, err
- }
-
- // Prepare ignorer
- shouldIgnore, err := prepareIgnorer(filepath.Join(absRootPath, DefaultIgnoreFile))
- if err != nil {
- return nil, err
- }
-
- err = filepath.Walk(absRootPath, func(absPath string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
-
- // Skip root directory
- if absPath == absRootPath {
- return nil
- }
-
- // Skip files that are not a directory or regular file
- if !info.IsDir() && !info.Mode().IsRegular() {
- return nil
- }
-
- // Get relative path from root
- relPath, err := filepath.Rel(absRootPath, absPath)
- if err != nil {
- return err
- }
-
- // Skip file if it is ignored by ignore file
- if shouldIgnore(relPath) {
- return nil
- }
-
- files = append(files, &LocalFile{
- absPath: absPath,
- relPath: relPath,
- info: info,
- })
-
- return nil
- })
-
- if err != nil {
- return nil, fmt.Errorf("Failed to prepare local files: %s", err)
- }
-
- return files, err
+ var files []*LocalFile
+
+ // Get absolute root path
+ absRootPath, err := filepath.Abs(root)
+ if err != nil {
+ return nil, err
+ }
+
+ // Prepare ignorer
+ shouldIgnore, err := prepareIgnorer(filepath.Join(absRootPath, DefaultIgnoreFile))
+ if err != nil {
+ return nil, err
+ }
+
+ err = filepath.Walk(absRootPath, func(absPath string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Skip root directory
+ if absPath == absRootPath {
+ return nil
+ }
+
+ // Skip files that are not a directory or regular file
+ if !info.IsDir() && !info.Mode().IsRegular() {
+ return nil
+ }
+
+ // Get relative path from root
+ relPath, err := filepath.Rel(absRootPath, absPath)
+ if err != nil {
+ return err
+ }
+
+ // Skip file if it is ignored by ignore file
+ if shouldIgnore(relPath) {
+ return nil
+ }
+
+ files = append(files, &LocalFile{
+ absPath: absPath,
+ relPath: relPath,
+ info: info,
+ })
+
+ return nil
+ })
+
+ if err != nil {
+ return nil, fmt.Errorf("Failed to prepare local files: %s", err)
+ }
+
+ return files, err
}
func (self *Drive) prepareRemoteFiles(rootDir *drive.File, sortOrder string) ([]*RemoteFile, error) {
- // Find all files which has rootDir as root
- listArgs := listAllFilesArgs{
- query: fmt.Sprintf("appProperties has {key='syncRootId' and value='%s'}", rootDir.Id),
- fields: []googleapi.Field{"nextPageToken", "files(id,name,parents,md5Checksum,mimeType,size,modifiedTime)"},
- sortOrder: sortOrder,
- }
- files, err := self.listAllFiles(listArgs)
- if err != nil {
- return nil, fmt.Errorf("Failed listing files: %s", err)
- }
-
- if err := checkFiles(files); err != nil {
- return nil, err
- }
-
- relPaths, err := prepareRemoteRelPaths(rootDir, files)
- if err != nil {
- return nil, err
- }
-
- var remoteFiles []*RemoteFile
- for _, f := range files {
- relPath, ok := relPaths[f.Id]
- if !ok {
- return nil, fmt.Errorf("File %s does not have a valid parent", f.Id)
- }
- remoteFiles = append(remoteFiles, &RemoteFile{
- relPath: relPath,
- file: f,
- })
- }
-
- return remoteFiles, nil
+ // Find all files which has rootDir as root
+ listArgs := listAllFilesArgs{
+ query: fmt.Sprintf("appProperties has {key='syncRootId' and value='%s'}", rootDir.Id),
+ fields: []googleapi.Field{"nextPageToken", "files(id,name,parents,md5Checksum,mimeType,size,modifiedTime)"},
+ sortOrder: sortOrder,
+ }
+ files, err := self.listAllFiles(listArgs)
+ if err != nil {
+ return nil, fmt.Errorf("Failed listing files: %s", err)
+ }
+
+ if err := checkFiles(files); err != nil {
+ return nil, err
+ }
+
+ relPaths, err := prepareRemoteRelPaths(rootDir, files)
+ if err != nil {
+ return nil, err
+ }
+
+ var remoteFiles []*RemoteFile
+ for _, f := range files {
+ relPath, ok := relPaths[f.Id]
+ if !ok {
+ return nil, fmt.Errorf("File %s does not have a valid parent", f.Id)
+ }
+ remoteFiles = append(remoteFiles, &RemoteFile{
+ relPath: relPath,
+ file: f,
+ })
+ }
+
+ return remoteFiles, nil
}
func prepareRemoteRelPaths(root *drive.File, files []*drive.File) (map[string]string, error) {
- // The tree only holds integer values so we use
- // maps to lookup file by index and index by file id
- indexLookup := map[string]graph.NI{}
- fileLookup := map[graph.NI]*drive.File{}
-
- // All files includes root dir
- allFiles := append([]*drive.File{root}, files...)
-
- // Prepare lookup maps
- for i, f := range allFiles {
- indexLookup[f.Id] = graph.NI(i)
- fileLookup[graph.NI(i)] = f
- }
-
- // This will hold 'parent index' -> 'file index' relationships
- pathEnds := make([]graph.PathEnd, len(allFiles))
-
- // Prepare parent -> file relationships
- for i, f := range allFiles {
- if f == root {
- pathEnds[i] = graph.PathEnd{From: -1}
- continue
- }
-
- // Lookup index of parent
- parentIdx, found := indexLookup[f.Parents[0]]
- if !found {
- return nil, fmt.Errorf("Could not find parent of %s (%s)", f.Id, f.Name)
- }
- pathEnds[i] = graph.PathEnd{From: parentIdx}
- }
-
- // Create parent pointer tree and calculate path lengths
- tree := &graph.FromList{Paths: pathEnds}
- tree.RecalcLeaves()
- tree.RecalcLen()
-
- // This will hold a map of file id => relative path
- paths := map[string]string{}
-
- // Find relative path from root for all files
- for _, f := range allFiles {
- if f == root {
- continue
- }
-
- // Find nodes between root and file
- nodes := tree.PathTo(indexLookup[f.Id], nil)
-
- // This will hold the name of all paths between root and
- // file (exluding root and including file itself)
- pathNames := []string{}
-
- // Lookup file for each node and grab name
- for _, n := range nodes {
- file := fileLookup[n]
- if file == root {
- continue
- }
- pathNames = append(pathNames, file.Name)
- }
-
- // Join path names to form relative path and add to map
- paths[f.Id] = filepath.Join(pathNames...)
- }
-
- return paths, nil
+ // The tree only holds integer values so we use
+ // maps to lookup file by index and index by file id
+ indexLookup := map[string]graph.NI{}
+ fileLookup := map[graph.NI]*drive.File{}
+
+ // All files includes root dir
+ allFiles := append([]*drive.File{root}, files...)
+
+ // Prepare lookup maps
+ for i, f := range allFiles {
+ indexLookup[f.Id] = graph.NI(i)
+ fileLookup[graph.NI(i)] = f
+ }
+
+ // This will hold 'parent index' -> 'file index' relationships
+ pathEnds := make([]graph.PathEnd, len(allFiles))
+
+ // Prepare parent -> file relationships
+ for i, f := range allFiles {
+ if f == root {
+ pathEnds[i] = graph.PathEnd{From: -1}
+ continue
+ }
+
+ // Lookup index of parent
+ parentIdx, found := indexLookup[f.Parents[0]]
+ if !found {
+ return nil, fmt.Errorf("Could not find parent of %s (%s)", f.Id, f.Name)
+ }
+ pathEnds[i] = graph.PathEnd{From: parentIdx}
+ }
+
+ // Create parent pointer tree and calculate path lengths
+ tree := &graph.FromList{Paths: pathEnds}
+ tree.RecalcLeaves()
+ tree.RecalcLen()
+
+ // This will hold a map of file id => relative path
+ paths := map[string]string{}
+
+ // Find relative path from root for all files
+ for _, f := range allFiles {
+ if f == root {
+ continue
+ }
+
+ // Find nodes between root and file
+ nodes := tree.PathTo(indexLookup[f.Id], nil)
+
+ // This will hold the name of all paths between root and
+ // file (exluding root and including file itself)
+ pathNames := []string{}
+
+ // Lookup file for each node and grab name
+ for _, n := range nodes {
+ file := fileLookup[n]
+ if file == root {
+ continue
+ }
+ pathNames = append(pathNames, file.Name)
+ }
+
+ // Join path names to form relative path and add to map
+ paths[f.Id] = filepath.Join(pathNames...)
+ }
+
+ return paths, nil
}
func checkFiles(files []*drive.File) error {
- uniq := map[string]string{}
+ uniq := map[string]string{}
- for _, f := range files {
- // Ensure all files have exactly one parent
- if len(f.Parents) != 1 {
- return fmt.Errorf("File %s does not have exacly one parent", f.Id)
- }
+ for _, f := range files {
+ // Ensure all files have exactly one parent
+ if len(f.Parents) != 1 {
+ return fmt.Errorf("File %s does not have exacly one parent", f.Id)
+ }
- // Ensure that there are no duplicate files
- uniqKey := f.Name + f.Parents[0]
- if dupeId, isDupe := uniq[uniqKey]; isDupe {
- return fmt.Errorf("Found name collision between %s and %s", f.Id, dupeId)
- }
- uniq[uniqKey] = f.Id
- }
+ // Ensure that there are no duplicate files
+ uniqKey := f.Name + f.Parents[0]
+ if dupeId, isDupe := uniq[uniqKey]; isDupe {
+ return fmt.Errorf("Found name collision between %s and %s", f.Id, dupeId)
+ }
+ uniq[uniqKey] = f.Id
+ }
- return nil
+ return nil
}
type LocalFile struct {
- absPath string
- relPath string
- info os.FileInfo
+ absPath string
+ relPath string
+ info os.FileInfo
}
type RemoteFile struct {
- relPath string
- file *drive.File
+ relPath string
+ file *drive.File
}
type changedFile struct {
- local *LocalFile
- remote *RemoteFile
+ local *LocalFile
+ remote *RemoteFile
}
type syncFiles struct {
- root *RemoteFile
- local []*LocalFile
- remote []*RemoteFile
- compare FileComparer
+ root *RemoteFile
+ local []*LocalFile
+ remote []*RemoteFile
+ compare FileComparer
}
type FileComparer interface {
- Changed(*LocalFile, *RemoteFile) bool
+ Changed(*LocalFile, *RemoteFile) bool
}
func (self LocalFile) AbsPath() string {
- return self.absPath
+ return self.absPath
}
func (self LocalFile) Size() int64 {
- return self.info.Size()
+ return self.info.Size()
}
func (self LocalFile) Modified() time.Time {
- return self.info.ModTime()
+ return self.info.ModTime()
}
func (self RemoteFile) Md5() string {
- return self.file.Md5Checksum
+ return self.file.Md5Checksum
}
func (self RemoteFile) Size() int64 {
- return self.file.Size
+ return self.file.Size
}
func (self RemoteFile) Modified() time.Time {
- t, _ := time.Parse(time.RFC3339, self.file.ModifiedTime)
- return t
+ t, _ := time.Parse(time.RFC3339, self.file.ModifiedTime)
+ return t
}
func (self *changedFile) compareModTime() ModTime {
- localTime := self.local.Modified()
- remoteTime := self.remote.Modified()
+ localTime := self.local.Modified()
+ remoteTime := self.remote.Modified()
- if localTime.After(remoteTime) {
- return LocalLastModified
- }
+ if localTime.After(remoteTime) {
+ return LocalLastModified
+ }
- if remoteTime.After(localTime) {
- return RemoteLastModified
- }
+ if remoteTime.After(localTime) {
+ return RemoteLastModified
+ }
- return EqualModifiedTime
+ return EqualModifiedTime
}
func (self *changedFile) compareSize() LargestSize {
- localSize := self.local.Size()
- remoteSize := self.remote.Size()
+ localSize := self.local.Size()
+ remoteSize := self.remote.Size()
- if localSize > remoteSize {
- return LocalLargestSize
- }
+ if localSize > remoteSize {
+ return LocalLargestSize
+ }
- if remoteSize > localSize {
- return RemoteLargestSize
- }
+ if remoteSize > localSize {
+ return RemoteLargestSize
+ }
- return EqualSize
+ return EqualSize
}
func (self *syncFiles) filterMissingRemoteDirs() []*LocalFile {
- var files []*LocalFile
+ var files []*LocalFile
- for _, lf := range self.local {
- if lf.info.IsDir() && !self.existsRemote(lf) {
- files = append(files, lf)
- }
- }
+ for _, lf := range self.local {
+ if lf.info.IsDir() && !self.existsRemote(lf) {
+ files = append(files, lf)
+ }
+ }
- return files
+ return files
}
func (self *syncFiles) filterMissingLocalDirs() []*RemoteFile {
- var files []*RemoteFile
+ var files []*RemoteFile
- for _, rf := range self.remote {
- if isDir(rf.file) && !self.existsLocal(rf) {
- files = append(files, rf)
- }
- }
+ for _, rf := range self.remote {
+ if isDir(rf.file) && !self.existsLocal(rf) {
+ files = append(files, rf)
+ }
+ }
- return files
+ return files
}
func (self *syncFiles) filterMissingRemoteFiles() []*LocalFile {
- var files []*LocalFile
+ var files []*LocalFile
- for _, lf := range self.local {
- if !lf.info.IsDir() && !self.existsRemote(lf) {
- files = append(files, lf)
- }
- }
+ for _, lf := range self.local {
+ if !lf.info.IsDir() && !self.existsRemote(lf) {
+ files = append(files, lf)
+ }
+ }
- return files
+ return files
}
func (self *syncFiles) filterMissingLocalFiles() []*RemoteFile {
- var files []*RemoteFile
+ var files []*RemoteFile
- for _, rf := range self.remote {
- if !isDir(rf.file) && !self.existsLocal(rf) {
- files = append(files, rf)
- }
- }
+ for _, rf := range self.remote {
+ if !isDir(rf.file) && !self.existsLocal(rf) {
+ files = append(files, rf)
+ }
+ }
- return files
+ return files
}
func (self *syncFiles) filterChangedLocalFiles() []*changedFile {
- var files []*changedFile
+ var files []*changedFile
- for _, lf := range self.local {
- // Skip directories
- if lf.info.IsDir() {
- continue
- }
+ for _, lf := range self.local {
+ // Skip directories
+ if lf.info.IsDir() {
+ continue
+ }
- // Skip files that don't exist on drive
- rf, found := self.findRemoteByPath(lf.relPath)
- if !found {
- continue
- }
+ // Skip files that don't exist on drive
+ rf, found := self.findRemoteByPath(lf.relPath)
+ if !found {
+ continue
+ }
- // Check if file has changed
- if self.compare.Changed(lf, rf) {
- files = append(files, &changedFile{
- local: lf,
- remote: rf,
- })
- }
- }
+ // Check if file has changed
+ if self.compare.Changed(lf, rf) {
+ files = append(files, &changedFile{
+ local: lf,
+ remote: rf,
+ })
+ }
+ }
- return files
+ return files
}
func (self *syncFiles) filterChangedRemoteFiles() []*changedFile {
- var files []*changedFile
+ var files []*changedFile
- for _, rf := range self.remote {
- // Skip directories
- if isDir(rf.file) {
- continue
- }
+ for _, rf := range self.remote {
+ // Skip directories
+ if isDir(rf.file) {
+ continue
+ }
- // Skip local files that don't exist
- lf, found := self.findLocalByPath(rf.relPath)
- if !found {
- continue
- }
+ // Skip local files that don't exist
+ lf, found := self.findLocalByPath(rf.relPath)
+ if !found {
+ continue
+ }
- // Check if file has changed
- if self.compare.Changed(lf, rf) {
- files = append(files, &changedFile{
- local: lf,
- remote: rf,
- })
- }
- }
+ // Check if file has changed
+ if self.compare.Changed(lf, rf) {
+ files = append(files, &changedFile{
+ local: lf,
+ remote: rf,
+ })
+ }
+ }
- return files
+ return files
}
func (self *syncFiles) filterExtraneousRemoteFiles() []*RemoteFile {
- var files []*RemoteFile
+ var files []*RemoteFile
- for _, rf := range self.remote {
- if !self.existsLocal(rf) {
- files = append(files, rf)
- }
- }
+ for _, rf := range self.remote {
+ if !self.existsLocal(rf) {
+ files = append(files, rf)
+ }
+ }
- return files
+ return files
}
func (self *syncFiles) filterExtraneousLocalFiles() []*LocalFile {
- var files []*LocalFile
+ var files []*LocalFile
- for _, lf := range self.local {
- if !self.existsRemote(lf) {
- files = append(files, lf)
- }
- }
+ for _, lf := range self.local {
+ if !self.existsRemote(lf) {
+ files = append(files, lf)
+ }
+ }
- return files
+ return files
}
func (self *syncFiles) existsRemote(lf *LocalFile) bool {
- _, found := self.findRemoteByPath(lf.relPath)
- return found
+ _, found := self.findRemoteByPath(lf.relPath)
+ return found
}
func (self *syncFiles) existsLocal(rf *RemoteFile) bool {
- _, found := self.findLocalByPath(rf.relPath)
- return found
+ _, found := self.findLocalByPath(rf.relPath)
+ return found
}
func (self *syncFiles) findRemoteByPath(relPath string) (*RemoteFile, bool) {
- if relPath == "." {
- return self.root, true
- }
+ if relPath == "." {
+ return self.root, true
+ }
- for _, rf := range self.remote {
- if relPath == rf.relPath {
- return rf, true
- }
- }
+ for _, rf := range self.remote {
+ if relPath == rf.relPath {
+ return rf, true
+ }
+ }
- return nil, false
+ return nil, false
}
func (self *syncFiles) findLocalByPath(relPath string) (*LocalFile, bool) {
- for _, lf := range self.local {
- if relPath == lf.relPath {
- return lf, true
- }
- }
+ for _, lf := range self.local {
+ if relPath == lf.relPath {
+ return lf, true
+ }
+ }
- return nil, false
+ return nil, false
}
func findLocalConflicts(files []*changedFile) []*changedFile {
- var conflicts []*changedFile
+ var conflicts []*changedFile
- for _, cf := range files {
- if cf.compareModTime() == LocalLastModified {
- conflicts = append(conflicts, cf)
- }
- }
+ for _, cf := range files {
+ if cf.compareModTime() == LocalLastModified {
+ conflicts = append(conflicts, cf)
+ }
+ }
- return conflicts
+ return conflicts
}
func findRemoteConflicts(files []*changedFile) []*changedFile {
- var conflicts []*changedFile
+ var conflicts []*changedFile
- for _, cf := range files {
- if cf.compareModTime() == RemoteLastModified {
- conflicts = append(conflicts, cf)
- }
- }
+ for _, cf := range files {
+ if cf.compareModTime() == RemoteLastModified {
+ conflicts = append(conflicts, cf)
+ }
+ }
- return conflicts
+ return conflicts
}
type byLocalPathLength []*LocalFile
func (self byLocalPathLength) Len() int {
- return len(self)
+ return len(self)
}
func (self byLocalPathLength) Swap(i, j int) {
- self[i], self[j] = self[j], self[i]
+ self[i], self[j] = self[j], self[i]
}
func (self byLocalPathLength) Less(i, j int) bool {
- return pathLength(self[i].relPath) < pathLength(self[j].relPath)
+ return pathLength(self[i].relPath) < pathLength(self[j].relPath)
}
type byRemotePathLength []*RemoteFile
func (self byRemotePathLength) Len() int {
- return len(self)
+ return len(self)
}
func (self byRemotePathLength) Swap(i, j int) {
- self[i], self[j] = self[j], self[i]
+ self[i], self[j] = self[j], self[i]
}
func (self byRemotePathLength) Less(i, j int) bool {
- return pathLength(self[i].relPath) < pathLength(self[j].relPath)
+ return pathLength(self[i].relPath) < pathLength(self[j].relPath)
}
type byRemotePath []*RemoteFile
func (self byRemotePath) Len() int {
- return len(self)
+ return len(self)
}
func (self byRemotePath) Swap(i, j int) {
- self[i], self[j] = self[j], self[i]
+ self[i], self[j] = self[j], self[i]
}
func (self byRemotePath) Less(i, j int) bool {
- return strings.ToLower(self[i].relPath) < strings.ToLower(self[j].relPath)
+ return strings.ToLower(self[i].relPath) < strings.ToLower(self[j].relPath)
}
type ignoreFunc func(string) bool
func prepareIgnorer(path string) (ignoreFunc, error) {
- acceptAll := func(string) bool {
- return false
- }
+ acceptAll := func(string) bool {
+ return false
+ }
- if !fileExists(path) {
- return acceptAll, nil
- }
+ if !fileExists(path) {
+ return acceptAll, nil
+ }
- ignorer, err := ignore.CompileIgnoreFile(path)
- if err != nil {
- return acceptAll, fmt.Errorf("Failed to prepare ignorer: %s", err)
- }
+ ignorer, err := ignore.CompileIgnoreFile(path)
+ if err != nil {
+ return acceptAll, fmt.Errorf("Failed to prepare ignorer: %s", err)
+ }
- return ignorer.MatchesPath, nil
+ return ignorer.MatchesPath, nil
}
func formatConflicts(conflicts []*changedFile, out io.Writer) {
- w := new(tabwriter.Writer)
- w.Init(out, 0, 0, 3, ' ', 0)
-
- fmt.Fprintln(w, "Path\tSize Local\tSize Remote\tModified Local\tModified Remote")
-
- for _, cf := range conflicts {
- fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n",
- truncateString(cf.local.relPath, 60),
- formatSize(cf.local.Size(), false),
- formatSize(cf.remote.Size(), false),
- cf.local.Modified().Local().Format("Jan _2 2006 15:04:05.000"),
- cf.remote.Modified().Local().Format("Jan _2 2006 15:04:05.000"),
- )
- }
-
- w.Flush()
+ w := new(tabwriter.Writer)
+ w.Init(out, 0, 0, 3, ' ', 0)
+
+ fmt.Fprintln(w, "Path\tSize Local\tSize Remote\tModified Local\tModified Remote")
+
+ for _, cf := range conflicts {
+ fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n",
+ truncateString(cf.local.relPath, 60),
+ formatSize(cf.local.Size(), false),
+ formatSize(cf.remote.Size(), false),
+ cf.local.Modified().Local().Format("Jan _2 2006 15:04:05.000"),
+ cf.remote.Modified().Local().Format("Jan _2 2006 15:04:05.000"),
+ )
+ }
+
+ w.Flush()
}
diff --git a/drive/sync_download.go b/drive/sync_download.go
index 4d84eea..04b50b9 100644
--- a/drive/sync_download.go
+++ b/drive/sync_download.go
@@ -1,325 +1,325 @@
package drive
import (
- "fmt"
- "io"
- "os"
- "sort"
- "time"
- "bytes"
- "path/filepath"
- "google.golang.org/api/googleapi"
- "google.golang.org/api/drive/v3"
+ "bytes"
+ "fmt"
+ "google.golang.org/api/drive/v3"
+ "google.golang.org/api/googleapi"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "time"
)
type DownloadSyncArgs struct {
- Out io.Writer
- Progress io.Writer
- RootId string
- Path string
- DryRun bool
- DeleteExtraneous bool
- Resolution ConflictResolution
- Comparer FileComparer
+ Out io.Writer
+ Progress io.Writer
+ RootId string
+ Path string
+ DryRun bool
+ DeleteExtraneous bool
+ Resolution ConflictResolution
+ Comparer FileComparer
}
func (self *Drive) DownloadSync(args DownloadSyncArgs) error {
- fmt.Fprintln(args.Out, "Starting sync...")
- started := time.Now()
-
- // Get remote root dir
- rootDir, err := self.getSyncRoot(args.RootId)
- if err != nil {
- return err
- }
-
- fmt.Fprintln(args.Out, "Collecting file information...")
- files, err := self.prepareSyncFiles(args.Path, rootDir, args.Comparer)
- if err != nil {
- return err
- }
-
- // Find changed files
- changedFiles := files.filterChangedRemoteFiles()
-
- fmt.Fprintf(args.Out, "Found %d local files and %d remote files\n", len(files.local), len(files.remote))
-
- // Ensure that we don't overwrite any local changes
- if args.Resolution == NoResolution {
- err = ensureNoLocalModifications(changedFiles)
- if err != nil {
- return fmt.Errorf("Conflict detected!\nThe following files have changed and the local file are newer than it's remote counterpart:\n\n%s\nNo conflict resolution was given, aborting...", err)
- }
- }
-
- // Create missing directories
- err = self.createMissingLocalDirs(files, args)
- if err != nil {
- return err
- }
-
- // Download missing files
- err = self.downloadMissingFiles(files, args)
- if err != nil {
- return err
- }
-
- // Download files that has changed
- err = self.downloadChangedFiles(changedFiles, args)
- if err != nil {
- return err
- }
-
- // Delete extraneous local files
- if args.DeleteExtraneous {
- err = self.deleteExtraneousLocalFiles(files, args)
- if err != nil {
- return err
- }
- }
- fmt.Fprintf(args.Out, "Sync finished in %s\n", time.Since(started))
-
- return nil
+ fmt.Fprintln(args.Out, "Starting sync...")
+ started := time.Now()
+
+ // Get remote root dir
+ rootDir, err := self.getSyncRoot(args.RootId)
+ if err != nil {
+ return err
+ }
+
+ fmt.Fprintln(args.Out, "Collecting file information...")
+ files, err := self.prepareSyncFiles(args.Path, rootDir, args.Comparer)
+ if err != nil {
+ return err
+ }
+
+ // Find changed files
+ changedFiles := files.filterChangedRemoteFiles()
+
+ fmt.Fprintf(args.Out, "Found %d local files and %d remote files\n", len(files.local), len(files.remote))
+
+ // Ensure that we don't overwrite any local changes
+ if args.Resolution == NoResolution {
+ err = ensureNoLocalModifications(changedFiles)
+ if err != nil {
+ return fmt.Errorf("Conflict detected!\nThe following files have changed and the local file are newer than it's remote counterpart:\n\n%s\nNo conflict resolution was given, aborting...", err)
+ }
+ }
+
+ // Create missing directories
+ err = self.createMissingLocalDirs(files, args)
+ if err != nil {
+ return err
+ }
+
+ // Download missing files
+ err = self.downloadMissingFiles(files, args)
+ if err != nil {
+ return err
+ }
+
+ // Download files that has changed
+ err = self.downloadChangedFiles(changedFiles, args)
+ if err != nil {
+ return err
+ }
+
+ // Delete extraneous local files
+ if args.DeleteExtraneous {
+ err = self.deleteExtraneousLocalFiles(files, args)
+ if err != nil {
+ return err
+ }
+ }
+ fmt.Fprintf(args.Out, "Sync finished in %s\n", time.Since(started))
+
+ return nil
}
func (self *Drive) getSyncRoot(rootId string) (*drive.File, error) {
- fields := []googleapi.Field{"id", "name", "mimeType", "appProperties"}
- f, err := self.service.Files.Get(rootId).Fields(fields...).Do()
- if err != nil {
- return nil, fmt.Errorf("Failed to find root dir: %s", err)
- }
-
- // Ensure file is a directory
- if !isDir(f) {
- return nil, fmt.Errorf("Provided root id is not a directory")
- }
-
- // Ensure directory is a proper syncRoot
- if _, ok := f.AppProperties["syncRoot"]; !ok {
- return nil, fmt.Errorf("Provided id is not a sync root directory")
- }
-
- return f, nil
+ fields := []googleapi.Field{"id", "name", "mimeType", "appProperties"}
+ f, err := self.service.Files.Get(rootId).Fields(fields...).Do()
+ if err != nil {
+ return nil, fmt.Errorf("Failed to find root dir: %s", err)
+ }
+
+ // Ensure file is a directory
+ if !isDir(f) {
+ return nil, fmt.Errorf("Provided root id is not a directory")
+ }
+
+ // Ensure directory is a proper syncRoot
+ if _, ok := f.AppProperties["syncRoot"]; !ok {
+ return nil, fmt.Errorf("Provided id is not a sync root directory")
+ }
+
+ return f, nil
}
func (self *Drive) createMissingLocalDirs(files *syncFiles, args DownloadSyncArgs) error {
- missingDirs := files.filterMissingLocalDirs()
- missingCount := len(missingDirs)
+ missingDirs := files.filterMissingLocalDirs()
+ missingCount := len(missingDirs)
- if missingCount > 0 {
- fmt.Fprintf(args.Out, "\n%d local directories are missing\n", missingCount)
- }
+ if missingCount > 0 {
+ fmt.Fprintf(args.Out, "\n%d local directories are missing\n", missingCount)
+ }
- // Sort directories so that the dirs with the shortest path comes first
- sort.Sort(byRemotePathLength(missingDirs))
+ // Sort directories so that the dirs with the shortest path comes first
+ sort.Sort(byRemotePathLength(missingDirs))
- for i, rf := range missingDirs {
- absPath, err := filepath.Abs(filepath.Join(args.Path, rf.relPath))
- if err != nil {
- return fmt.Errorf("Failed to determine local absolute path: %s", err)
- }
- fmt.Fprintf(args.Out, "[%04d/%04d] Creating directory %s\n", i + 1, missingCount, filepath.Join(filepath.Base(args.Path), rf.relPath))
+ for i, rf := range missingDirs {
+ absPath, err := filepath.Abs(filepath.Join(args.Path, rf.relPath))
+ if err != nil {
+ return fmt.Errorf("Failed to determine local absolute path: %s", err)
+ }
+ fmt.Fprintf(args.Out, "[%04d/%04d] Creating directory %s\n", i+1, missingCount, filepath.Join(filepath.Base(args.Path), rf.relPath))
- if args.DryRun {
- continue
- }
+ if args.DryRun {
+ continue
+ }
- os.MkdirAll(absPath, 0775)
- }
+ os.MkdirAll(absPath, 0775)
+ }
- return nil
+ return nil
}
func (self *Drive) downloadMissingFiles(files *syncFiles, args DownloadSyncArgs) error {
- missingFiles := files.filterMissingLocalFiles()
- missingCount := len(missingFiles)
-
- if missingCount > 0 {
- fmt.Fprintf(args.Out, "\n%d local files are missing\n", missingCount)
- }
-
- for i, rf := range missingFiles {
- absPath, err := filepath.Abs(filepath.Join(args.Path, rf.relPath))
- if err != nil {
- return fmt.Errorf("Failed to determine local absolute path: %s", err)
- }
- fmt.Fprintf(args.Out, "[%04d/%04d] Downloading %s -> %s\n", i + 1, missingCount, rf.relPath, filepath.Join(filepath.Base(args.Path), rf.relPath))
-
- err = self.downloadRemoteFile(rf.file.Id, absPath, args, 0)
- if err != nil {
- return err
- }
- }
-
- return nil
+ missingFiles := files.filterMissingLocalFiles()
+ missingCount := len(missingFiles)
+
+ if missingCount > 0 {
+ fmt.Fprintf(args.Out, "\n%d local files are missing\n", missingCount)
+ }
+
+ for i, rf := range missingFiles {
+ absPath, err := filepath.Abs(filepath.Join(args.Path, rf.relPath))
+ if err != nil {
+ return fmt.Errorf("Failed to determine local absolute path: %s", err)
+ }
+ fmt.Fprintf(args.Out, "[%04d/%04d] Downloading %s -> %s\n", i+1, missingCount, rf.relPath, filepath.Join(filepath.Base(args.Path), rf.relPath))
+
+ err = self.downloadRemoteFile(rf.file.Id, absPath, args, 0)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
}
func (self *Drive) downloadChangedFiles(changedFiles []*changedFile, args DownloadSyncArgs) error {
- changedCount := len(changedFiles)
-
- if changedCount > 0 {
- fmt.Fprintf(args.Out, "\n%d remote files has changed\n", changedCount)
- }
-
- for i, cf := range changedFiles {
- if skip, reason := checkLocalConflict(cf, args.Resolution); skip {
- fmt.Fprintf(args.Out, "[%04d/%04d] Skipping %s (%s)\n", i + 1, changedCount, cf.remote.relPath, reason)
- continue
- }
-
- absPath, err := filepath.Abs(filepath.Join(args.Path, cf.remote.relPath))
- if err != nil {
- return fmt.Errorf("Failed to determine local absolute path: %s", err)
- }
- fmt.Fprintf(args.Out, "[%04d/%04d] Downloading %s -> %s\n", i + 1, changedCount, cf.remote.relPath, filepath.Join(filepath.Base(args.Path), cf.remote.relPath))
-
- err = self.downloadRemoteFile(cf.remote.file.Id, absPath, args, 0)
- if err != nil {
- return err
- }
- }
-
- return nil
+ changedCount := len(changedFiles)
+
+ if changedCount > 0 {
+ fmt.Fprintf(args.Out, "\n%d remote files has changed\n", changedCount)
+ }
+
+ for i, cf := range changedFiles {
+ if skip, reason := checkLocalConflict(cf, args.Resolution); skip {
+ fmt.Fprintf(args.Out, "[%04d/%04d] Skipping %s (%s)\n", i+1, changedCount, cf.remote.relPath, reason)
+ continue
+ }
+
+ absPath, err := filepath.Abs(filepath.Join(args.Path, cf.remote.relPath))
+ if err != nil {
+ return fmt.Errorf("Failed to determine local absolute path: %s", err)
+ }
+ fmt.Fprintf(args.Out, "[%04d/%04d] Downloading %s -> %s\n", i+1, changedCount, cf.remote.relPath, filepath.Join(filepath.Base(args.Path), cf.remote.relPath))
+
+ err = self.downloadRemoteFile(cf.remote.file.Id, absPath, args, 0)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
}
func (self *Drive) downloadRemoteFile(id, fpath string, args DownloadSyncArgs, try int) error {
- if args.DryRun {
- return nil
- }
-
- // Get timeout reader wrapper and context
- timeoutReaderWrapper, ctx := getTimeoutReaderWrapperContext()
-
- res, err := self.service.Files.Get(id).Context(ctx).Download()
- if err != nil {
- if isBackendError(err) && try < MaxBackendErrorRetries {
- exponentialBackoffSleep(try)
- try++
- return self.downloadRemoteFile(id, fpath, args, try)
- } else {
- return fmt.Errorf("Failed to download file: %s", err)
- }
- }
-
- // Close body on function exit
- defer res.Body.Close()
-
- // Wrap response body in progress reader
- progressReader := getProgressReader(res.Body, args.Progress, res.ContentLength)
-
- // Wrap reader in timeout reader
- reader := timeoutReaderWrapper(progressReader)
-
- // Ensure any parent directories exists
- if err = mkdir(fpath); err != nil {
- return err
- }
-
- // Download to tmp file
- tmpPath := fpath + ".incomplete"
-
- // Create new file
- outFile, err := os.Create(tmpPath)
- if err != nil {
- return fmt.Errorf("Unable to create local file: %s", err)
- }
-
- // Save file to disk
- _, err = io.Copy(outFile, reader)
- if err != nil {
- outFile.Close()
- if try < MaxBackendErrorRetries {
- exponentialBackoffSleep(try)
- try++
- return self.downloadRemoteFile(id, fpath, args, try)
- } else {
- os.Remove(tmpPath)
- return fmt.Errorf("Download was interrupted: %s", err)
- }
- }
-
- // Close file
- outFile.Close()
-
- // Rename tmp file to proper filename
- return os.Rename(tmpPath, fpath)
+ if args.DryRun {
+ return nil
+ }
+
+ // Get timeout reader wrapper and context
+ timeoutReaderWrapper, ctx := getTimeoutReaderWrapperContext()
+
+ res, err := self.service.Files.Get(id).Context(ctx).Download()
+ if err != nil {
+ if isBackendError(err) && try < MaxBackendErrorRetries {
+ exponentialBackoffSleep(try)
+ try++
+ return self.downloadRemoteFile(id, fpath, args, try)
+ } else {
+ return fmt.Errorf("Failed to download file: %s", err)
+ }
+ }
+
+ // Close body on function exit
+ defer res.Body.Close()
+
+ // Wrap response body in progress reader
+ progressReader := getProgressReader(res.Body, args.Progress, res.ContentLength)
+
+ // Wrap reader in timeout reader
+ reader := timeoutReaderWrapper(progressReader)
+
+ // Ensure any parent directories exists
+ if err = mkdir(fpath); err != nil {
+ return err
+ }
+
+ // Download to tmp file
+ tmpPath := fpath + ".incomplete"
+
+ // Create new file
+ outFile, err := os.Create(tmpPath)
+ if err != nil {
+ return fmt.Errorf("Unable to create local file: %s", err)
+ }
+
+ // Save file to disk
+ _, err = io.Copy(outFile, reader)
+ if err != nil {
+ outFile.Close()
+ if try < MaxBackendErrorRetries {
+ exponentialBackoffSleep(try)
+ try++
+ return self.downloadRemoteFile(id, fpath, args, try)
+ } else {
+ os.Remove(tmpPath)
+ return fmt.Errorf("Download was interrupted: %s", err)
+ }
+ }
+
+ // Close file
+ outFile.Close()
+
+ // Rename tmp file to proper filename
+ return os.Rename(tmpPath, fpath)
}
func (self *Drive) deleteExtraneousLocalFiles(files *syncFiles, args DownloadSyncArgs) error {
- extraneousFiles := files.filterExtraneousLocalFiles()
- extraneousCount := len(extraneousFiles)
+ extraneousFiles := files.filterExtraneousLocalFiles()
+ extraneousCount := len(extraneousFiles)
- if extraneousCount > 0 {
- fmt.Fprintf(args.Out, "\n%d local files are extraneous\n", extraneousCount)
- }
+ if extraneousCount > 0 {
+ fmt.Fprintf(args.Out, "\n%d local files are extraneous\n", extraneousCount)
+ }
- // Sort files so that the files with the longest path comes first
- sort.Sort(sort.Reverse(byLocalPathLength(extraneousFiles)))
+ // Sort files so that the files with the longest path comes first
+ sort.Sort(sort.Reverse(byLocalPathLength(extraneousFiles)))
- for i, lf := range extraneousFiles {
- fmt.Fprintf(args.Out, "[%04d/%04d] Deleting %s\n", i + 1, extraneousCount, lf.absPath)
+ for i, lf := range extraneousFiles {
+ fmt.Fprintf(args.Out, "[%04d/%04d] Deleting %s\n", i+1, extraneousCount, lf.absPath)
- if args.DryRun {
- continue
- }
+ if args.DryRun {
+ continue
+ }
- err := os.Remove(lf.absPath)
- if err != nil {
- return fmt.Errorf("Failed to delete local file: %s", err)
- }
- }
+ err := os.Remove(lf.absPath)
+ if err != nil {
+ return fmt.Errorf("Failed to delete local file: %s", err)
+ }
+ }
- return nil
+ return nil
}
func checkLocalConflict(cf *changedFile, resolution ConflictResolution) (bool, string) {
- // No conflict unless local file was last modified
- if cf.compareModTime() != LocalLastModified {
- return false, ""
- }
-
- // Don't skip if want to keep the remote file
- if resolution == KeepRemote {
- return false, ""
- }
-
- // Skip if we want to keep the local file
- if resolution == KeepLocal {
- return true, "conflicting file, keeping local file"
- }
-
- if resolution == KeepLargest {
- largest := cf.compareSize()
-
- // Skip if the local file is largest
- if largest == LocalLargestSize {
- return true, "conflicting file, local file is largest, keeping local"
- }
-
- // Don't skip if the remote file is largest
- if largest == RemoteLargestSize {
- return false, ""
- }
-
- // Keep local if both files have the same size
- if largest == EqualSize {
- return true, "conflicting file, file sizes are equal, keeping local"
- }
- }
-
- // The conditionals above should cover all cases,
- // unless the programmer did something wrong,
- // in which case we default to being non-destructive and skip the file
- return true, "conflicting file, unhandled case"
+ // No conflict unless local file was last modified
+ if cf.compareModTime() != LocalLastModified {
+ return false, ""
+ }
+
+ // Don't skip if want to keep the remote file
+ if resolution == KeepRemote {
+ return false, ""
+ }
+
+ // Skip if we want to keep the local file
+ if resolution == KeepLocal {
+ return true, "conflicting file, keeping local file"
+ }
+
+ if resolution == KeepLargest {
+ largest := cf.compareSize()
+
+ // Skip if the local file is largest
+ if largest == LocalLargestSize {
+ return true, "conflicting file, local file is largest, keeping local"
+ }
+
+ // Don't skip if the remote file is largest
+ if largest == RemoteLargestSize {
+ return false, ""
+ }
+
+ // Keep local if both files have the same size
+ if largest == EqualSize {
+ return true, "conflicting file, file sizes are equal, keeping local"
+ }
+ }
+
+ // The conditionals above should cover all cases,
+ // unless the programmer did something wrong,
+ // in which case we default to being non-destructive and skip the file
+ return true, "conflicting file, unhandled case"
}
func ensureNoLocalModifications(files []*changedFile) error {
- conflicts := findLocalConflicts(files)
- if len(conflicts) == 0 {
- return nil
- }
-
- buffer := bytes.NewBufferString("")
- formatConflicts(conflicts, buffer)
- return fmt.Errorf(buffer.String())
+ conflicts := findLocalConflicts(files)
+ if len(conflicts) == 0 {
+ return nil
+ }
+
+ buffer := bytes.NewBufferString("")
+ formatConflicts(conflicts, buffer)
+ return fmt.Errorf(buffer.String())
}
diff --git a/drive/sync_list.go b/drive/sync_list.go
index e035239..c9b84fb 100644
--- a/drive/sync_list.go
+++ b/drive/sync_list.go
@@ -1,97 +1,97 @@
package drive
import (
- "fmt"
- "sort"
- "io"
- "text/tabwriter"
- "google.golang.org/api/googleapi"
- "google.golang.org/api/drive/v3"
+ "fmt"
+ "google.golang.org/api/drive/v3"
+ "google.golang.org/api/googleapi"
+ "io"
+ "sort"
+ "text/tabwriter"
)
type ListSyncArgs struct {
- Out io.Writer
- SkipHeader bool
+ Out io.Writer
+ SkipHeader bool
}
func (self *Drive) ListSync(args ListSyncArgs) error {
- listArgs := listAllFilesArgs{
- query: "appProperties has {key='syncRoot' and value='true'}",
- fields: []googleapi.Field{"nextPageToken", "files(id,name,mimeType,createdTime)"},
- }
- files, err := self.listAllFiles(listArgs)
- if err != nil {
- return err
- }
- printSyncDirectories(files, args)
- return nil
+ listArgs := listAllFilesArgs{
+ query: "appProperties has {key='syncRoot' and value='true'}",
+ fields: []googleapi.Field{"nextPageToken", "files(id,name,mimeType,createdTime)"},
+ }
+ files, err := self.listAllFiles(listArgs)
+ if err != nil {
+ return err
+ }
+ printSyncDirectories(files, args)
+ return nil
}
type ListRecursiveSyncArgs struct {
- Out io.Writer
- RootId string
- SkipHeader bool
- PathWidth int64
- SizeInBytes bool
- SortOrder string
+ Out io.Writer
+ RootId string
+ SkipHeader bool
+ PathWidth int64
+ SizeInBytes bool
+ SortOrder string
}
func (self *Drive) ListRecursiveSync(args ListRecursiveSyncArgs) error {
- rootDir, err := self.getSyncRoot(args.RootId)
- if err != nil {
- return err
- }
-
- files, err := self.prepareRemoteFiles(rootDir, args.SortOrder)
- if err != nil {
- return err
- }
-
- printSyncDirContent(files, args)
- return nil
+ rootDir, err := self.getSyncRoot(args.RootId)
+ if err != nil {
+ return err
+ }
+
+ files, err := self.prepareRemoteFiles(rootDir, args.SortOrder)
+ if err != nil {
+ return err
+ }
+
+ printSyncDirContent(files, args)
+ return nil
}
func printSyncDirectories(files []*drive.File, args ListSyncArgs) {
- w := new(tabwriter.Writer)
- w.Init(args.Out, 0, 0, 3, ' ', 0)
-
- if !args.SkipHeader {
- fmt.Fprintln(w, "Id\tName\tCreated")
- }
-
- for _, f := range files {
- fmt.Fprintf(w, "%s\t%s\t%s\n",
- f.Id,
- f.Name,
- formatDatetime(f.CreatedTime),
- )
- }
-
- w.Flush()
+ w := new(tabwriter.Writer)
+ w.Init(args.Out, 0, 0, 3, ' ', 0)
+
+ if !args.SkipHeader {
+ fmt.Fprintln(w, "Id\tName\tCreated")
+ }
+
+ for _, f := range files {
+ fmt.Fprintf(w, "%s\t%s\t%s\n",
+ f.Id,
+ f.Name,
+ formatDatetime(f.CreatedTime),
+ )
+ }
+
+ w.Flush()
}
func printSyncDirContent(files []*RemoteFile, args ListRecursiveSyncArgs) {
- if args.SortOrder == "" {
- // Sort files by path
- sort.Sort(byRemotePath(files))
- }
-
- w := new(tabwriter.Writer)
- w.Init(args.Out, 0, 0, 3, ' ', 0)
-
- if !args.SkipHeader {
- fmt.Fprintln(w, "Id\tPath\tType\tSize\tModified")
- }
-
- for _, rf := range files {
- fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n",
- rf.file.Id,
- truncateString(rf.relPath, int(args.PathWidth)),
- filetype(rf.file),
- formatSize(rf.file.Size, args.SizeInBytes),
- formatDatetime(rf.file.ModifiedTime),
- )
- }
-
- w.Flush()
+ if args.SortOrder == "" {
+ // Sort files by path
+ sort.Sort(byRemotePath(files))
+ }
+
+ w := new(tabwriter.Writer)
+ w.Init(args.Out, 0, 0, 3, ' ', 0)
+
+ if !args.SkipHeader {
+ fmt.Fprintln(w, "Id\tPath\tType\tSize\tModified")
+ }
+
+ for _, rf := range files {
+ fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n",
+ rf.file.Id,
+ truncateString(rf.relPath, int(args.PathWidth)),
+ filetype(rf.file),
+ formatSize(rf.file.Size, args.SizeInBytes),
+ formatDatetime(rf.file.ModifiedTime),
+ )
+ }
+
+ w.Flush()
}
diff --git a/drive/sync_upload.go b/drive/sync_upload.go
index 96442e1..0d5c208 100644
--- a/drive/sync_upload.go
+++ b/drive/sync_upload.go
@@ -1,476 +1,475 @@
package drive
import (
- "fmt"
- "io"
- "os"
- "time"
- "sort"
- "bytes"
- "path/filepath"
- "google.golang.org/api/googleapi"
- "google.golang.org/api/drive/v3"
+ "bytes"
+ "fmt"
+ "google.golang.org/api/drive/v3"
+ "google.golang.org/api/googleapi"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "time"
)
type UploadSyncArgs struct {
- Out io.Writer
- Progress io.Writer
- Path string
- RootId string
- DryRun bool
- DeleteExtraneous bool
- ChunkSize int64
- Resolution ConflictResolution
- Comparer FileComparer
+ Out io.Writer
+ Progress io.Writer
+ Path string
+ RootId string
+ DryRun bool
+ DeleteExtraneous bool
+ ChunkSize int64
+ Resolution ConflictResolution
+ Comparer FileComparer
}
func (self *Drive) UploadSync(args UploadSyncArgs) error {
- if args.ChunkSize > intMax() - 1 {
- return fmt.Errorf("Chunk size is to big, max chunk size for this computer is %d", intMax() - 1)
- }
-
- fmt.Fprintln(args.Out, "Starting sync...")
- started := time.Now()
-
- // Create root directory if it does not exist
- rootDir, err := self.prepareSyncRoot(args)
- if err != nil {
- return err
- }
-
- fmt.Fprintln(args.Out, "Collecting local and remote file information...")
- files, err := self.prepareSyncFiles(args.Path, rootDir, args.Comparer)
- if err != nil {
- return err
- }
-
- // Find missing and changed files
- changedFiles := files.filterChangedLocalFiles()
- missingFiles := files.filterMissingRemoteFiles()
-
- fmt.Fprintf(args.Out, "Found %d local files and %d remote files\n", len(files.local), len(files.remote))
-
- // Ensure that there is enough free space on drive
- if ok, msg := self.checkRemoteFreeSpace(missingFiles, changedFiles); !ok {
- return fmt.Errorf(msg)
- }
-
- // Ensure that we don't overwrite any remote changes
- if args.Resolution == NoResolution {
- err = ensureNoRemoteModifications(changedFiles)
- if err != nil {
- return fmt.Errorf("Conflict detected!\nThe following files have changed and the remote file are newer than it's local counterpart:\n\n%s\nNo conflict resolution was given, aborting...", err)
- }
- }
-
- // Create missing directories
- files, err = self.createMissingRemoteDirs(files, args)
- if err != nil {
- return err
- }
-
- // Upload missing files
- err = self.uploadMissingFiles(missingFiles, files, args)
- if err != nil {
- return err
- }
-
- // Update modified files
- err = self.updateChangedFiles(changedFiles, rootDir, args)
- if err != nil {
- return err
- }
-
- // Delete extraneous files on drive
- if args.DeleteExtraneous {
- err = self.deleteExtraneousRemoteFiles(files, args)
- if err != nil {
- return err
- }
- }
- fmt.Fprintf(args.Out, "Sync finished in %s\n", time.Since(started))
-
- return nil
+ if args.ChunkSize > intMax()-1 {
+ return fmt.Errorf("Chunk size is to big, max chunk size for this computer is %d", intMax()-1)
+ }
+
+ fmt.Fprintln(args.Out, "Starting sync...")
+ started := time.Now()
+
+ // Create root directory if it does not exist
+ rootDir, err := self.prepareSyncRoot(args)
+ if err != nil {
+ return err
+ }
+
+ fmt.Fprintln(args.Out, "Collecting local and remote file information...")
+ files, err := self.prepareSyncFiles(args.Path, rootDir, args.Comparer)
+ if err != nil {
+ return err
+ }
+
+ // Find missing and changed files
+ changedFiles := files.filterChangedLocalFiles()
+ missingFiles := files.filterMissingRemoteFiles()
+
+ fmt.Fprintf(args.Out, "Found %d local files and %d remote files\n", len(files.local), len(files.remote))
+
+ // Ensure that there is enough free space on drive
+ if ok, msg := self.checkRemoteFreeSpace(missingFiles, changedFiles); !ok {
+ return fmt.Errorf(msg)
+ }
+
+ // Ensure that we don't overwrite any remote changes
+ if args.Resolution == NoResolution {
+ err = ensureNoRemoteModifications(changedFiles)
+ if err != nil {
+ return fmt.Errorf("Conflict detected!\nThe following files have changed and the remote file are newer than it's local counterpart:\n\n%s\nNo conflict resolution was given, aborting...", err)
+ }
+ }
+
+ // Create missing directories
+ files, err = self.createMissingRemoteDirs(files, args)
+ if err != nil {
+ return err
+ }
+
+ // Upload missing files
+ err = self.uploadMissingFiles(missingFiles, files, args)
+ if err != nil {
+ return err
+ }
+
+ // Update modified files
+ err = self.updateChangedFiles(changedFiles, rootDir, args)
+ if err != nil {
+ return err
+ }
+
+ // Delete extraneous files on drive
+ if args.DeleteExtraneous {
+ err = self.deleteExtraneousRemoteFiles(files, args)
+ if err != nil {
+ return err
+ }
+ }
+ fmt.Fprintf(args.Out, "Sync finished in %s\n", time.Since(started))
+
+ return nil
}
func (self *Drive) prepareSyncRoot(args UploadSyncArgs) (*drive.File, error) {
- fields := []googleapi.Field{"id", "name", "mimeType", "appProperties"}
- f, err := self.service.Files.Get(args.RootId).Fields(fields...).Do()
- if err != nil {
- return nil, fmt.Errorf("Failed to find root dir: %s", err)
- }
-
- // Ensure file is a directory
- if !isDir(f) {
- return nil, fmt.Errorf("Provided root id is not a directory")
- }
-
- // Return directory if syncRoot property is already set
- if _, ok := f.AppProperties["syncRoot"]; ok {
- return f, nil
- }
-
- // This is the first time this directory have been used for sync
- // Check if the directory is empty
- isEmpty, err := self.dirIsEmpty(f.Id)
- if err != nil {
- return nil, fmt.Errorf("Failed to check if root dir is empty: %s", err)
- }
-
- // Ensure that the directory is empty
- if !isEmpty {
- return nil, fmt.Errorf("Root directoy is not empty, the initial sync requires an empty directory")
- }
-
- // Update directory with syncRoot property
- dstFile := &drive.File{
- AppProperties: map[string]string{"sync": "true", "syncRoot": "true"},
- }
-
- f, err = self.service.Files.Update(f.Id, dstFile).Fields(fields...).Do()
- if err != nil {
- return nil, fmt.Errorf("Failed to update root directory: %s", err)
- }
-
- return f, nil
+ fields := []googleapi.Field{"id", "name", "mimeType", "appProperties"}
+ f, err := self.service.Files.Get(args.RootId).Fields(fields...).Do()
+ if err != nil {
+ return nil, fmt.Errorf("Failed to find root dir: %s", err)
+ }
+
+ // Ensure file is a directory
+ if !isDir(f) {
+ return nil, fmt.Errorf("Provided root id is not a directory")
+ }
+
+ // Return directory if syncRoot property is already set
+ if _, ok := f.AppProperties["syncRoot"]; ok {
+ return f, nil
+ }
+
+ // This is the first time this directory have been used for sync
+ // Check if the directory is empty
+ isEmpty, err := self.dirIsEmpty(f.Id)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to check if root dir is empty: %s", err)
+ }
+
+ // Ensure that the directory is empty
+ if !isEmpty {
+ return nil, fmt.Errorf("Root directoy is not empty, the initial sync requires an empty directory")
+ }
+
+ // Update directory with syncRoot property
+ dstFile := &drive.File{
+ AppProperties: map[string]string{"sync": "true", "syncRoot": "true"},
+ }
+
+ f, err = self.service.Files.Update(f.Id, dstFile).Fields(fields...).Do()
+ if err != nil {
+ return nil, fmt.Errorf("Failed to update root directory: %s", err)
+ }
+
+ return f, nil
}
func (self *Drive) createMissingRemoteDirs(files *syncFiles, args UploadSyncArgs) (*syncFiles, error) {
- missingDirs := files.filterMissingRemoteDirs()
- missingCount := len(missingDirs)
-
- if missingCount > 0 {
- fmt.Fprintf(args.Out, "\n%d remote directories are missing\n", missingCount)
- }
-
- // Sort directories so that the dirs with the shortest path comes first
- sort.Sort(byLocalPathLength(missingDirs))
-
- for i, lf := range missingDirs {
- parentPath := parentFilePath(lf.relPath)
- parent, ok := files.findRemoteByPath(parentPath)
- if !ok {
- return nil, fmt.Errorf("Could not find remote directory with path '%s'", parentPath)
- }
-
- fmt.Fprintf(args.Out, "[%04d/%04d] Creating directory %s\n", i + 1, missingCount, filepath.Join(files.root.file.Name, lf.relPath))
-
- f, err := self.createMissingRemoteDir(createMissingRemoteDirArgs{
- name: lf.info.Name(),
- parentId: parent.file.Id,
- rootId: args.RootId,
- dryRun: args.DryRun,
- try: 0,
- })
- if err != nil {
- return nil, err
- }
-
- files.remote = append(files.remote, &RemoteFile{
- relPath: lf.relPath,
- file: f,
- })
- }
-
- return files, nil
+ missingDirs := files.filterMissingRemoteDirs()
+ missingCount := len(missingDirs)
+
+ if missingCount > 0 {
+ fmt.Fprintf(args.Out, "\n%d remote directories are missing\n", missingCount)
+ }
+
+ // Sort directories so that the dirs with the shortest path comes first
+ sort.Sort(byLocalPathLength(missingDirs))
+
+ for i, lf := range missingDirs {
+ parentPath := parentFilePath(lf.relPath)
+ parent, ok := files.findRemoteByPath(parentPath)
+ if !ok {
+ return nil, fmt.Errorf("Could not find remote directory with path '%s'", parentPath)
+ }
+
+ fmt.Fprintf(args.Out, "[%04d/%04d] Creating directory %s\n", i+1, missingCount, filepath.Join(files.root.file.Name, lf.relPath))
+
+ f, err := self.createMissingRemoteDir(createMissingRemoteDirArgs{
+ name: lf.info.Name(),
+ parentId: parent.file.Id,
+ rootId: args.RootId,
+ dryRun: args.DryRun,
+ try: 0,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ files.remote = append(files.remote, &RemoteFile{
+ relPath: lf.relPath,
+ file: f,
+ })
+ }
+
+ return files, nil
}
type createMissingRemoteDirArgs struct {
- name string
- parentId string
- rootId string
- dryRun bool
- try int
+ name string
+ parentId string
+ rootId string
+ dryRun bool
+ try int
}
func (self *Drive) uploadMissingFiles(missingFiles []*LocalFile, files *syncFiles, args UploadSyncArgs) error {
- missingCount := len(missingFiles)
+ missingCount := len(missingFiles)
- if missingCount > 0 {
- fmt.Fprintf(args.Out, "\n%d remote files are missing\n", missingCount)
- }
+ if missingCount > 0 {
+ fmt.Fprintf(args.Out, "\n%d remote files are missing\n", missingCount)
+ }
- for i, lf := range missingFiles {
- parentPath := parentFilePath(lf.relPath)
- parent, ok := files.findRemoteByPath(parentPath)
- if !ok {
- return fmt.Errorf("Could not find remote directory with path '%s'", parentPath)
- }
+ for i, lf := range missingFiles {
+ parentPath := parentFilePath(lf.relPath)
+ parent, ok := files.findRemoteByPath(parentPath)
+ if !ok {
+ return fmt.Errorf("Could not find remote directory with path '%s'", parentPath)
+ }
- fmt.Fprintf(args.Out, "[%04d/%04d] Uploading %s -> %s\n", i + 1, missingCount, lf.relPath, filepath.Join(files.root.file.Name, lf.relPath))
+ fmt.Fprintf(args.Out, "[%04d/%04d] Uploading %s -> %s\n", i+1, missingCount, lf.relPath, filepath.Join(files.root.file.Name, lf.relPath))
- err := self.uploadMissingFile(parent.file.Id, lf, args, 0)
- if err != nil {
- return err
- }
- }
+ err := self.uploadMissingFile(parent.file.Id, lf, args, 0)
+ if err != nil {
+ return err
+ }
+ }
- return nil
+ return nil
}
func (self *Drive) updateChangedFiles(changedFiles []*changedFile, root *drive.File, args UploadSyncArgs) error {
- changedCount := len(changedFiles)
+ changedCount := len(changedFiles)
- if changedCount > 0 {
- fmt.Fprintf(args.Out, "\n%d local files has changed\n", changedCount)
- }
+ if changedCount > 0 {
+ fmt.Fprintf(args.Out, "\n%d local files has changed\n", changedCount)
+ }
- for i, cf := range changedFiles {
- if skip, reason := checkRemoteConflict(cf, args.Resolution); skip {
- fmt.Fprintf(args.Out, "[%04d/%04d] Skipping %s (%s)\n", i + 1, changedCount, cf.local.relPath, reason)
- continue
- }
+ for i, cf := range changedFiles {
+ if skip, reason := checkRemoteConflict(cf, args.Resolution); skip {
+ fmt.Fprintf(args.Out, "[%04d/%04d] Skipping %s (%s)\n", i+1, changedCount, cf.local.relPath, reason)
+ continue
+ }
- fmt.Fprintf(args.Out, "[%04d/%04d] Updating %s -> %s\n", i + 1, changedCount, cf.local.relPath, filepath.Join(root.Name, cf.local.relPath))
+ fmt.Fprintf(args.Out, "[%04d/%04d] Updating %s -> %s\n", i+1, changedCount, cf.local.relPath, filepath.Join(root.Name, cf.local.relPath))
- err := self.updateChangedFile(cf, args, 0)
- if err != nil {
- return err
- }
- }
+ err := self.updateChangedFile(cf, args, 0)
+ if err != nil {
+ return err
+ }
+ }
- return nil
+ return nil
}
func (self *Drive) deleteExtraneousRemoteFiles(files *syncFiles, args UploadSyncArgs) error {
- extraneousFiles := files.filterExtraneousRemoteFiles()
- extraneousCount := len(extraneousFiles)
+ extraneousFiles := files.filterExtraneousRemoteFiles()
+ extraneousCount := len(extraneousFiles)
- if extraneousCount > 0 {
- fmt.Fprintf(args.Out, "\n%d remote files are extraneous\n", extraneousCount)
- }
+ if extraneousCount > 0 {
+ fmt.Fprintf(args.Out, "\n%d remote files are extraneous\n", extraneousCount)
+ }
- // Sort files so that the files with the longest path comes first
- sort.Sort(sort.Reverse(byRemotePathLength(extraneousFiles)))
+ // Sort files so that the files with the longest path comes first
+ sort.Sort(sort.Reverse(byRemotePathLength(extraneousFiles)))
- for i, rf := range extraneousFiles {
- fmt.Fprintf(args.Out, "[%04d/%04d] Deleting %s\n", i + 1, extraneousCount, filepath.Join(files.root.file.Name, rf.relPath))
+ for i, rf := range extraneousFiles {
+ fmt.Fprintf(args.Out, "[%04d/%04d] Deleting %s\n", i+1, extraneousCount, filepath.Join(files.root.file.Name, rf.relPath))
- err := self.deleteRemoteFile(rf, args, 0)
- if err != nil {
- return err
- }
- }
+ err := self.deleteRemoteFile(rf, args, 0)
+ if err != nil {
+ return err
+ }
+ }
- return nil
+ return nil
}
func (self *Drive) createMissingRemoteDir(args createMissingRemoteDirArgs) (*drive.File, error) {
- dstFile := &drive.File{
- Name: args.name,
- MimeType: DirectoryMimeType,
- Parents: []string{args.parentId},
- AppProperties: map[string]string{"sync": "true", "syncRootId": args.rootId},
- }
-
- if args.dryRun {
- return dstFile, nil
- }
-
- f, err := self.service.Files.Create(dstFile).Do()
- if err != nil {
- if isBackendError(err) && args.try < MaxBackendErrorRetries {
- exponentialBackoffSleep(args.try)
- args.try++
- return self.createMissingRemoteDir(args)
- } else {
- return nil, fmt.Errorf("Failed to create directory: %s", err)
- }
- }
-
- return f, nil
+ dstFile := &drive.File{
+ Name: args.name,
+ MimeType: DirectoryMimeType,
+ Parents: []string{args.parentId},
+ AppProperties: map[string]string{"sync": "true", "syncRootId": args.rootId},
+ }
+
+ if args.dryRun {
+ return dstFile, nil
+ }
+
+ f, err := self.service.Files.Create(dstFile).Do()
+ if err != nil {
+ if isBackendError(err) && args.try < MaxBackendErrorRetries {
+ exponentialBackoffSleep(args.try)
+ args.try++
+ return self.createMissingRemoteDir(args)
+ } else {
+ return nil, fmt.Errorf("Failed to create directory: %s", err)
+ }
+ }
+
+ return f, nil
}
func (self *Drive) uploadMissingFile(parentId string, lf *LocalFile, args UploadSyncArgs, try int) error {
- if args.DryRun {
- return nil
- }
-
- srcFile, err := os.Open(lf.absPath)
- if err != nil {
- return fmt.Errorf("Failed to open file: %s", err)
- }
-
- // Close file on function exit
- defer srcFile.Close()
-
- // Instantiate drive file
- dstFile := &drive.File{
- Name: lf.info.Name(),
- Parents: []string{parentId},
- AppProperties: map[string]string{"sync": "true", "syncRootId": args.RootId},
- }
-
- // Chunk size option
- chunkSize := googleapi.ChunkSize(int(args.ChunkSize))
-
- // Wrap file in progress reader
- progressReader := getProgressReader(srcFile, args.Progress, lf.info.Size())
-
- // Wrap reader in timeout reader
- reader, ctx := getTimeoutReaderContext(progressReader)
-
- _, err = self.service.Files.Create(dstFile).Fields("id", "name", "size", "md5Checksum").Context(ctx).Media(reader, chunkSize).Do()
- if err != nil {
- if isBackendError(err) && try < MaxBackendErrorRetries {
- exponentialBackoffSleep(try)
- try++
- return self.uploadMissingFile(parentId, lf, args, try)
- } else {
- return fmt.Errorf("Failed to upload file: %s", err)
- }
- }
-
- return nil
+ if args.DryRun {
+ return nil
+ }
+
+ srcFile, err := os.Open(lf.absPath)
+ if err != nil {
+ return fmt.Errorf("Failed to open file: %s", err)
+ }
+
+ // Close file on function exit
+ defer srcFile.Close()
+
+ // Instantiate drive file
+ dstFile := &drive.File{
+ Name: lf.info.Name(),
+ Parents: []string{parentId},
+ AppProperties: map[string]string{"sync": "true", "syncRootId": args.RootId},
+ }
+
+ // Chunk size option
+ chunkSize := googleapi.ChunkSize(int(args.ChunkSize))
+
+ // Wrap file in progress reader
+ progressReader := getProgressReader(srcFile, args.Progress, lf.info.Size())
+
+ // Wrap reader in timeout reader
+ reader, ctx := getTimeoutReaderContext(progressReader)
+
+ _, err = self.service.Files.Create(dstFile).Fields("id", "name", "size", "md5Checksum").Context(ctx).Media(reader, chunkSize).Do()
+ if err != nil {
+ if isBackendError(err) && try < MaxBackendErrorRetries {
+ exponentialBackoffSleep(try)
+ try++
+ return self.uploadMissingFile(parentId, lf, args, try)
+ } else {
+ return fmt.Errorf("Failed to upload file: %s", err)
+ }
+ }
+
+ return nil
}
func (self *Drive) updateChangedFile(cf *changedFile, args UploadSyncArgs, try int) error {
- if args.DryRun {
- return nil
- }
-
- srcFile, err := os.Open(cf.local.absPath)
- if err != nil {
- return fmt.Errorf("Failed to open file: %s", err)
- }
-
- // Close file on function exit
- defer srcFile.Close()
-
- // Instantiate drive file
- dstFile := &drive.File{}
-
- // Chunk size option
- chunkSize := googleapi.ChunkSize(int(args.ChunkSize))
-
- // Wrap file in progress reader
- progressReader := getProgressReader(srcFile, args.Progress, cf.local.info.Size())
-
- // Wrap reader in timeout reader
- reader, ctx := getTimeoutReaderContext(progressReader)
-
- _, err = self.service.Files.Update(cf.remote.file.Id, dstFile).Context(ctx).Media(reader, chunkSize).Do()
- if err != nil {
- if isBackendError(err) && try < MaxBackendErrorRetries {
- exponentialBackoffSleep(try)
- try++
- return self.updateChangedFile(cf, args, try)
- } else {
- return fmt.Errorf("Failed to update file: %s", err)
- }
- }
-
- return nil
+ if args.DryRun {
+ return nil
+ }
+
+ srcFile, err := os.Open(cf.local.absPath)
+ if err != nil {
+ return fmt.Errorf("Failed to open file: %s", err)
+ }
+
+ // Close file on function exit
+ defer srcFile.Close()
+
+ // Instantiate drive file
+ dstFile := &drive.File{}
+
+ // Chunk size option
+ chunkSize := googleapi.ChunkSize(int(args.ChunkSize))
+
+ // Wrap file in progress reader
+ progressReader := getProgressReader(srcFile, args.Progress, cf.local.info.Size())
+
+ // Wrap reader in timeout reader
+ reader, ctx := getTimeoutReaderContext(progressReader)
+
+ _, err = self.service.Files.Update(cf.remote.file.Id, dstFile).Context(ctx).Media(reader, chunkSize).Do()
+ if err != nil {
+ if isBackendError(err) && try < MaxBackendErrorRetries {
+ exponentialBackoffSleep(try)
+ try++
+ return self.updateChangedFile(cf, args, try)
+ } else {
+ return fmt.Errorf("Failed to update file: %s", err)
+ }
+ }
+
+ return nil
}
func (self *Drive) deleteRemoteFile(rf *RemoteFile, args UploadSyncArgs, try int) error {
- if args.DryRun {
- return nil
- }
-
-
- err := self.service.Files.Delete(rf.file.Id).Do()
- if err != nil {
- if isBackendError(err) && try < MaxBackendErrorRetries {
- exponentialBackoffSleep(try)
- try++
- return self.deleteRemoteFile(rf, args, try)
- } else {
- return fmt.Errorf("Failed to delete file: %s", err)
- }
- }
-
- return nil
+ if args.DryRun {
+ return nil
+ }
+
+ err := self.service.Files.Delete(rf.file.Id).Do()
+ if err != nil {
+ if isBackendError(err) && try < MaxBackendErrorRetries {
+ exponentialBackoffSleep(try)
+ try++
+ return self.deleteRemoteFile(rf, args, try)
+ } else {
+ return fmt.Errorf("Failed to delete file: %s", err)
+ }
+ }
+
+ return nil
}
func (self *Drive) dirIsEmpty(id string) (bool, error) {
- query := fmt.Sprintf("'%s' in parents", id)
- fileList, err := self.service.Files.List().Q(query).Do()
- if err != nil {
- return false, fmt.Errorf("Empty dir check failed: ", err)
- }
+ query := fmt.Sprintf("'%s' in parents", id)
+ fileList, err := self.service.Files.List().Q(query).Do()
+ if err != nil {
+ return false, fmt.Errorf("Empty dir check failed: ", err)
+ }
- return len(fileList.Files) == 0, nil
+ return len(fileList.Files) == 0, nil
}
func checkRemoteConflict(cf *changedFile, resolution ConflictResolution) (bool, string) {
- // No conflict unless remote file was last modified
- if cf.compareModTime() != RemoteLastModified {
- return false, ""
- }
-
- // Don't skip if want to keep the local file
- if resolution == KeepLocal {
- return false, ""
- }
-
- // Skip if we want to keep the remote file
- if resolution == KeepRemote {
- return true, "conflicting file, keeping remote file"
- }
-
- if resolution == KeepLargest {
- largest := cf.compareSize()
-
- // Skip if the remote file is largest
- if largest == RemoteLargestSize {
- return true, "conflicting file, remote file is largest, keeping remote"
- }
-
- // Don't skip if the local file is largest
- if largest == LocalLargestSize {
- return false, ""
- }
-
- // Keep remote if both files have the same size
- if largest == EqualSize {
- return true, "conflicting file, file sizes are equal, keeping remote"
- }
- }
-
- // The conditionals above should cover all cases,
- // unless the programmer did something wrong,
- // in which case we default to being non-destructive and skip the file
- return true, "conflicting file, unhandled case"
+ // No conflict unless remote file was last modified
+ if cf.compareModTime() != RemoteLastModified {
+ return false, ""
+ }
+
+ // Don't skip if want to keep the local file
+ if resolution == KeepLocal {
+ return false, ""
+ }
+
+ // Skip if we want to keep the remote file
+ if resolution == KeepRemote {
+ return true, "conflicting file, keeping remote file"
+ }
+
+ if resolution == KeepLargest {
+ largest := cf.compareSize()
+
+ // Skip if the remote file is largest
+ if largest == RemoteLargestSize {
+ return true, "conflicting file, remote file is largest, keeping remote"
+ }
+
+ // Don't skip if the local file is largest
+ if largest == LocalLargestSize {
+ return false, ""
+ }
+
+ // Keep remote if both files have the same size
+ if largest == EqualSize {
+ return true, "conflicting file, file sizes are equal, keeping remote"
+ }
+ }
+
+ // The conditionals above should cover all cases,
+ // unless the programmer did something wrong,
+ // in which case we default to being non-destructive and skip the file
+ return true, "conflicting file, unhandled case"
}
func ensureNoRemoteModifications(files []*changedFile) error {
- conflicts := findRemoteConflicts(files)
- if len(conflicts) == 0 {
- return nil
- }
-
- buffer := bytes.NewBufferString("")
- formatConflicts(conflicts, buffer)
- return fmt.Errorf(buffer.String())
+ conflicts := findRemoteConflicts(files)
+ if len(conflicts) == 0 {
+ return nil
+ }
+
+ buffer := bytes.NewBufferString("")
+ formatConflicts(conflicts, buffer)
+ return fmt.Errorf(buffer.String())
}
func (self *Drive) checkRemoteFreeSpace(missingFiles []*LocalFile, changedFiles []*changedFile) (bool, string) {
- about, err := self.service.About.Get().Fields("storageQuota").Do()
- if err != nil {
- return false, fmt.Sprintf("Failed to determine free space: %s", err)
- }
+ about, err := self.service.About.Get().Fields("storageQuota").Do()
+ if err != nil {
+ return false, fmt.Sprintf("Failed to determine free space: %s", err)
+ }
- quota := about.StorageQuota
- if quota.Limit == 0 {
- return true, ""
- }
+ quota := about.StorageQuota
+ if quota.Limit == 0 {
+ return true, ""
+ }
- freeSpace := quota.Limit - quota.Usage
+ freeSpace := quota.Limit - quota.Usage
- var totalSize int64
+ var totalSize int64
- for _, lf := range missingFiles {
- totalSize += lf.Size()
- }
+ for _, lf := range missingFiles {
+ totalSize += lf.Size()
+ }
- for _, cf := range changedFiles {
- totalSize += cf.local.Size()
- }
+ for _, cf := range changedFiles {
+ totalSize += cf.local.Size()
+ }
- if totalSize > freeSpace {
- return false, fmt.Sprintf("Not enough free space, have %s need %s", formatSize(freeSpace, false), formatSize(totalSize, false))
- }
+ if totalSize > freeSpace {
+ return false, fmt.Sprintf("Not enough free space, have %s need %s", formatSize(freeSpace, false), formatSize(totalSize, false))
+ }
- return true, ""
+ return true, ""
}
diff --git a/drive/timeout_reader.go b/drive/timeout_reader.go
index 878911b..9930c12 100644
--- a/drive/timeout_reader.go
+++ b/drive/timeout_reader.go
@@ -1,10 +1,10 @@
package drive
import (
- "io"
- "time"
- "sync"
- "golang.org/x/net/context"
+ "golang.org/x/net/context"
+ "io"
+ "sync"
+ "time"
)
const MaxIdleTimeout = time.Second * 120
@@ -13,89 +13,89 @@ const TimeoutTimerInterval = time.Second * 10
type timeoutReaderWrapper func(io.Reader) io.Reader
func getTimeoutReaderWrapperContext() (timeoutReaderWrapper, context.Context) {
- ctx, cancel := context.WithCancel(context.TODO())
- wrapper := func(r io.Reader) io.Reader {
- return getTimeoutReader(r, cancel)
- }
- return wrapper, ctx
+ ctx, cancel := context.WithCancel(context.TODO())
+ wrapper := func(r io.Reader) io.Reader {
+ return getTimeoutReader(r, cancel)
+ }
+ return wrapper, ctx
}
func getTimeoutReaderContext(r io.Reader) (io.Reader, context.Context) {
- ctx, cancel := context.WithCancel(context.TODO())
- return getTimeoutReader(r, cancel), ctx
+ ctx, cancel := context.WithCancel(context.TODO())
+ return getTimeoutReader(r, cancel), ctx
}
func getTimeoutReader(r io.Reader, cancel context.CancelFunc) io.Reader {
- return &TimeoutReader{
- reader: r,
- cancel: cancel,
- mutex: &sync.Mutex{},
- }
+ return &TimeoutReader{
+ reader: r,
+ cancel: cancel,
+ mutex: &sync.Mutex{},
+ }
}
type TimeoutReader struct {
- reader io.Reader
- cancel context.CancelFunc
- lastActivity time.Time
- timer *time.Timer
- mutex *sync.Mutex
- done bool
+ reader io.Reader
+ cancel context.CancelFunc
+ lastActivity time.Time
+ timer *time.Timer
+ mutex *sync.Mutex
+ done bool
}
func (self *TimeoutReader) Read(p []byte) (int, error) {
- if self.timer == nil {
- self.startTimer()
- }
+ if self.timer == nil {
+ self.startTimer()
+ }
- self.mutex.Lock()
+ self.mutex.Lock()
- // Read
- n, err := self.reader.Read(p)
+ // Read
+ n, err := self.reader.Read(p)
- self.lastActivity = time.Now()
- self.done = (err != nil)
+ self.lastActivity = time.Now()
+ self.done = (err != nil)
- self.mutex.Unlock()
+ self.mutex.Unlock()
- if self.done {
- self.stopTimer()
- }
+ if self.done {
+ self.stopTimer()
+ }
- return n, err
+ return n, err
}
func (self *TimeoutReader) startTimer() {
- self.mutex.Lock()
- defer self.mutex.Unlock()
+ self.mutex.Lock()
+ defer self.mutex.Unlock()
- if !self.done {
- self.timer = time.AfterFunc(TimeoutTimerInterval, self.timeout)
- }
+ if !self.done {
+ self.timer = time.AfterFunc(TimeoutTimerInterval, self.timeout)
+ }
}
func (self *TimeoutReader) stopTimer() {
- self.mutex.Lock()
- defer self.mutex.Unlock()
+ self.mutex.Lock()
+ defer self.mutex.Unlock()
- if self.timer != nil {
- self.timer.Stop()
- }
+ if self.timer != nil {
+ self.timer.Stop()
+ }
}
func (self *TimeoutReader) timeout() {
- self.mutex.Lock()
+ self.mutex.Lock()
- if self.done {
- self.mutex.Unlock()
- return
- }
+ if self.done {
+ self.mutex.Unlock()
+ return
+ }
- if time.Since(self.lastActivity) > MaxIdleTimeout {
- self.cancel()
- self.mutex.Unlock()
- return
- }
+ if time.Since(self.lastActivity) > MaxIdleTimeout {
+ self.cancel()
+ self.mutex.Unlock()
+ return
+ }
- self.mutex.Unlock()
- self.startTimer()
+ self.mutex.Unlock()
+ self.startTimer()
}
diff --git a/drive/update.go b/drive/update.go
index 5bdd040..156eb2f 100644
--- a/drive/update.go
+++ b/drive/update.go
@@ -1,75 +1,75 @@
package drive
import (
- "fmt"
- "mime"
- "time"
- "io"
- "path/filepath"
- "google.golang.org/api/googleapi"
- "google.golang.org/api/drive/v3"
+ "fmt"
+ "google.golang.org/api/drive/v3"
+ "google.golang.org/api/googleapi"
+ "io"
+ "mime"
+ "path/filepath"
+ "time"
)
type UpdateArgs struct {
- Out io.Writer
- Progress io.Writer
- Id string
- Path string
- Name string
- Parents []string
- Mime string
- Recursive bool
- ChunkSize int64
+ Out io.Writer
+ Progress io.Writer
+ Id string
+ Path string
+ Name string
+ Parents []string
+ Mime string
+ Recursive bool
+ ChunkSize int64
}
func (self *Drive) Update(args UpdateArgs) error {
- srcFile, srcFileInfo, err := openFile(args.Path)
- if err != nil {
- return fmt.Errorf("Failed to open file: %s", err)
- }
+ srcFile, srcFileInfo, err := openFile(args.Path)
+ if err != nil {
+ return fmt.Errorf("Failed to open file: %s", err)
+ }
- defer srcFile.Close()
+ defer srcFile.Close()
- // Instantiate empty drive file
- dstFile := &drive.File{}
+ // Instantiate empty drive file
+ dstFile := &drive.File{}
- // Use provided file name or use filename
- if args.Name == "" {
- dstFile.Name = filepath.Base(srcFileInfo.Name())
- } else {
- dstFile.Name = args.Name
- }
+ // Use provided file name or use filename
+ if args.Name == "" {
+ dstFile.Name = filepath.Base(srcFileInfo.Name())
+ } else {
+ dstFile.Name = args.Name
+ }
- // Set provided mime type or get type based on file extension
- if args.Mime == "" {
- dstFile.MimeType = mime.TypeByExtension(filepath.Ext(dstFile.Name))
- } else {
- dstFile.MimeType = args.Mime
- }
+ // Set provided mime type or get type based on file extension
+ if args.Mime == "" {
+ dstFile.MimeType = mime.TypeByExtension(filepath.Ext(dstFile.Name))
+ } else {
+ dstFile.MimeType = args.Mime
+ }
- // Set parent folders
- dstFile.Parents = args.Parents
+ // Set parent folders
+ dstFile.Parents = args.Parents
- // Chunk size option
- chunkSize := googleapi.ChunkSize(int(args.ChunkSize))
+ // Chunk size option
+ chunkSize := googleapi.ChunkSize(int(args.ChunkSize))
- // Wrap file in progress reader
- progressReader := getProgressReader(srcFile, args.Progress, srcFileInfo.Size())
+ // Wrap file in progress reader
+ progressReader := getProgressReader(srcFile, args.Progress, srcFileInfo.Size())
- // Wrap reader in timeout reader
- reader, ctx := getTimeoutReaderContext(progressReader)
+ // Wrap reader in timeout reader
+ reader, ctx := getTimeoutReaderContext(progressReader)
- fmt.Fprintf(args.Out, "Uploading %s\n", args.Path)
- started := time.Now()
+ fmt.Fprintf(args.Out, "Uploading %s\n", args.Path)
+ started := time.Now()
- f, err := self.service.Files.Update(args.Id, dstFile).Fields("id", "name", "size").Context(ctx).Media(reader, chunkSize).Do()
- if err != nil {
- return fmt.Errorf("Failed to upload file: %s", err)
- }
+ f, err := self.service.Files.Update(args.Id, dstFile).Fields("id", "name", "size").Context(ctx).Media(reader, chunkSize).Do()
+ if err != nil {
+ return fmt.Errorf("Failed to upload file: %s", err)
+ }
- // Calculate average upload rate
- rate := calcRate(f.Size, started, time.Now())
+ // Calculate average upload rate
+ rate := calcRate(f.Size, started, time.Now())
- fmt.Fprintf(args.Out, "Updated %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(f.Size, false))
- return nil
+ fmt.Fprintf(args.Out, "Updated %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(f.Size, false))
+ return nil
}
diff --git a/drive/upload.go b/drive/upload.go
index 0bbc014..c42bebd 100644
--- a/drive/upload.go
+++ b/drive/upload.go
@@ -1,249 +1,249 @@
package drive
import (
- "fmt"
- "mime"
- "os"
- "io"
- "time"
- "path/filepath"
- "google.golang.org/api/googleapi"
- "google.golang.org/api/drive/v3"
+ "fmt"
+ "google.golang.org/api/drive/v3"
+ "google.golang.org/api/googleapi"
+ "io"
+ "mime"
+ "os"
+ "path/filepath"
+ "time"
)
type UploadArgs struct {
- Out io.Writer
- Progress io.Writer
- Path string
- Name string
- Parents []string
- Mime string
- Recursive bool
- Share bool
- Delete bool
- ChunkSize int64
+ Out io.Writer
+ Progress io.Writer
+ Path string
+ Name string
+ Parents []string
+ Mime string
+ Recursive bool
+ Share bool
+ Delete bool
+ ChunkSize int64
}
func (self *Drive) Upload(args UploadArgs) error {
- if args.ChunkSize > intMax() - 1 {
- return fmt.Errorf("Chunk size is to big, max chunk size for this computer is %d", intMax() - 1)
- }
-
- // Ensure that none of the parents are sync dirs
- for _, parent := range args.Parents {
- isSyncDir, err := self.isSyncFile(parent)
- if err != nil {
- return err
- }
-
- if isSyncDir {
- return fmt.Errorf("%s is a sync directory, use 'sync upload' instead", parent)
- }
- }
-
- if args.Recursive {
- return self.uploadRecursive(args)
- }
-
- info, err := os.Stat(args.Path)
- if err != nil {
- return fmt.Errorf("Failed stat file: %s", err)
- }
-
- if info.IsDir() {
- return fmt.Errorf("'%s' is a directory, use --recursive to upload directories", info.Name())
- }
-
- f, rate, err := self.uploadFile(args)
- if err != nil {
- return err
- }
- fmt.Fprintf(args.Out, "Uploaded %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(f.Size, false))
-
- if args.Share {
- err = self.shareAnyoneReader(f.Id)
- if err != nil {
- return err
- }
-
- fmt.Fprintf(args.Out, "File is readable by anyone at %s\n", f.WebContentLink)
- }
-
- if args.Delete {
- err = os.Remove(args.Path)
- if err != nil {
- return fmt.Errorf("Failed to delete file: %s", err)
- }
- fmt.Fprintf(args.Out, "Removed %s\n", args.Path)
- }
-
- return nil
+ if args.ChunkSize > intMax()-1 {
+ return fmt.Errorf("Chunk size is to big, max chunk size for this computer is %d", intMax()-1)
+ }
+
+ // Ensure that none of the parents are sync dirs
+ for _, parent := range args.Parents {
+ isSyncDir, err := self.isSyncFile(parent)
+ if err != nil {
+ return err
+ }
+
+ if isSyncDir {
+ return fmt.Errorf("%s is a sync directory, use 'sync upload' instead", parent)
+ }
+ }
+
+ if args.Recursive {
+ return self.uploadRecursive(args)
+ }
+
+ info, err := os.Stat(args.Path)
+ if err != nil {
+ return fmt.Errorf("Failed stat file: %s", err)
+ }
+
+ if info.IsDir() {
+ return fmt.Errorf("'%s' is a directory, use --recursive to upload directories", info.Name())
+ }
+
+ f, rate, err := self.uploadFile(args)
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(args.Out, "Uploaded %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(f.Size, false))
+
+ if args.Share {
+ err = self.shareAnyoneReader(f.Id)
+ if err != nil {
+ return err
+ }
+
+ fmt.Fprintf(args.Out, "File is readable by anyone at %s\n", f.WebContentLink)
+ }
+
+ if args.Delete {
+ err = os.Remove(args.Path)
+ if err != nil {
+ return fmt.Errorf("Failed to delete file: %s", err)
+ }
+ fmt.Fprintf(args.Out, "Removed %s\n", args.Path)
+ }
+
+ return nil
}
func (self *Drive) uploadRecursive(args UploadArgs) error {
- info, err := os.Stat(args.Path)
- if err != nil {
- return fmt.Errorf("Failed stat file: %s", err)
- }
-
- if info.IsDir() {
- args.Name = ""
- return self.uploadDirectory(args)
- } else {
- _, _, err := self.uploadFile(args)
- return err
- }
+ info, err := os.Stat(args.Path)
+ if err != nil {
+ return fmt.Errorf("Failed stat file: %s", err)
+ }
+
+ if info.IsDir() {
+ args.Name = ""
+ return self.uploadDirectory(args)
+ } else {
+ _, _, err := self.uploadFile(args)
+ return err
+ }
}
func (self *Drive) uploadDirectory(args UploadArgs) error {
- srcFile, srcFileInfo, err := openFile(args.Path)
- if err != nil {
- return err
- }
-
- // Close file on function exit
- defer srcFile.Close()
-
- fmt.Fprintf(args.Out, "Creating directory %s\n", srcFileInfo.Name())
- // Make directory on drive
- f, err := self.mkdir(MkdirArgs{
- Out: args.Out,
- Name: srcFileInfo.Name(),
- Parents: args.Parents,
- })
- if err != nil {
- return err
- }
-
- // Read files from directory
- names, err := srcFile.Readdirnames(0)
- if err != nil && err != io.EOF {
- return fmt.Errorf("Failed reading directory: %s", err)
- }
-
- for _, name := range names {
- // Copy args and set new path and parents
- newArgs := args
- newArgs.Path = filepath.Join(args.Path, name)
- newArgs.Parents = []string{f.Id}
-
- // Upload
- err = self.uploadRecursive(newArgs)
- if err != nil {
- return err
- }
- }
-
- return nil
+ srcFile, srcFileInfo, err := openFile(args.Path)
+ if err != nil {
+ return err
+ }
+
+ // Close file on function exit
+ defer srcFile.Close()
+
+ fmt.Fprintf(args.Out, "Creating directory %s\n", srcFileInfo.Name())
+ // Make directory on drive
+ f, err := self.mkdir(MkdirArgs{
+ Out: args.Out,
+ Name: srcFileInfo.Name(),
+ Parents: args.Parents,
+ })
+ if err != nil {
+ return err
+ }
+
+ // Read files from directory
+ names, err := srcFile.Readdirnames(0)
+ if err != nil && err != io.EOF {
+ return fmt.Errorf("Failed reading directory: %s", err)
+ }
+
+ for _, name := range names {
+ // Copy args and set new path and parents
+ newArgs := args
+ newArgs.Path = filepath.Join(args.Path, name)
+ newArgs.Parents = []string{f.Id}
+
+ // Upload
+ err = self.uploadRecursive(newArgs)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
}
func (self *Drive) uploadFile(args UploadArgs) (*drive.File, int64, error) {
- srcFile, srcFileInfo, err := openFile(args.Path)
- if err != nil {
- return nil, 0, err
- }
+ srcFile, srcFileInfo, err := openFile(args.Path)
+ if err != nil {
+ return nil, 0, err
+ }
- // Close file on function exit
- defer srcFile.Close()
+ // Close file on function exit
+ defer srcFile.Close()
- // Instantiate empty drive file
- dstFile := &drive.File{}
+ // Instantiate empty drive file
+ dstFile := &drive.File{}
- // Use provided file name or use filename
- if args.Name == "" {
- dstFile.Name = filepath.Base(srcFileInfo.Name())
- } else {
- dstFile.Name = args.Name
- }
+ // Use provided file name or use filename
+ if args.Name == "" {
+ dstFile.Name = filepath.Base(srcFileInfo.Name())
+ } else {
+ dstFile.Name = args.Name
+ }
- // Set provided mime type or get type based on file extension
- if args.Mime == "" {
- dstFile.MimeType = mime.TypeByExtension(filepath.Ext(dstFile.Name))
- } else {
- dstFile.MimeType = args.Mime
- }
+ // Set provided mime type or get type based on file extension
+ if args.Mime == "" {
+ dstFile.MimeType = mime.TypeByExtension(filepath.Ext(dstFile.Name))
+ } else {
+ dstFile.MimeType = args.Mime
+ }
- // Set parent folders
- dstFile.Parents = args.Parents
+ // Set parent folders
+ dstFile.Parents = args.Parents
- // Chunk size option
- chunkSize := googleapi.ChunkSize(int(args.ChunkSize))
+ // Chunk size option
+ chunkSize := googleapi.ChunkSize(int(args.ChunkSize))
- // Wrap file in progress reader
- progressReader := getProgressReader(srcFile, args.Progress, srcFileInfo.Size())
+ // Wrap file in progress reader
+ progressReader := getProgressReader(srcFile, args.Progress, srcFileInfo.Size())
- // Wrap reader in timeout reader
- reader, ctx := getTimeoutReaderContext(progressReader)
+ // Wrap reader in timeout reader
+ reader, ctx := getTimeoutReaderContext(progressReader)
- fmt.Fprintf(args.Out, "Uploading %s\n", args.Path)
- started := time.Now()
+ fmt.Fprintf(args.Out, "Uploading %s\n", args.Path)
+ started := time.Now()
- f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size", "md5Checksum", "webContentLink").Context(ctx).Media(reader, chunkSize).Do()
- if err != nil {
- return nil, 0, fmt.Errorf("Failed to upload file: %s", err)
- }
+ f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size", "md5Checksum", "webContentLink").Context(ctx).Media(reader, chunkSize).Do()
+ if err != nil {
+ return nil, 0, fmt.Errorf("Failed to upload file: %s", err)
+ }
- // Calculate average upload rate
- rate := calcRate(f.Size, started, time.Now())
+ // Calculate average upload rate
+ rate := calcRate(f.Size, started, time.Now())
- return f, rate, nil
+ return f, rate, nil
}
type UploadStreamArgs struct {
- Out io.Writer
- In io.Reader
- Name string
- Parents []string
- Mime string
- Share bool
- ChunkSize int64
- Progress io.Writer
+ Out io.Writer
+ In io.Reader
+ Name string
+ Parents []string
+ Mime string
+ Share bool
+ ChunkSize int64
+ Progress io.Writer
}
func (self *Drive) UploadStream(args UploadStreamArgs) error {
- if args.ChunkSize > intMax() - 1 {
- return fmt.Errorf("Chunk size is to big, max chunk size for this computer is %d", intMax() - 1)
- }
+ if args.ChunkSize > intMax()-1 {
+ return fmt.Errorf("Chunk size is to big, max chunk size for this computer is %d", intMax()-1)
+ }
- // Instantiate empty drive file
- dstFile := &drive.File{Name: args.Name}
+ // Instantiate empty drive file
+ dstFile := &drive.File{Name: args.Name}
- // Set mime type if provided
- if args.Mime != "" {
- dstFile.MimeType = args.Mime
- }
+ // Set mime type if provided
+ if args.Mime != "" {
+ dstFile.MimeType = args.Mime
+ }
- // Set parent folders
- dstFile.Parents = args.Parents
+ // Set parent folders
+ dstFile.Parents = args.Parents
- // Chunk size option
- chunkSize := googleapi.ChunkSize(int(args.ChunkSize))
+ // Chunk size option
+ chunkSize := googleapi.ChunkSize(int(args.ChunkSize))
- // Wrap file in progress reader
- progressReader := getProgressReader(args.In, args.Progress, 0)
+ // Wrap file in progress reader
+ progressReader := getProgressReader(args.In, args.Progress, 0)
- // Wrap reader in timeout reader
- reader, ctx := getTimeoutReaderContext(progressReader)
+ // Wrap reader in timeout reader
+ reader, ctx := getTimeoutReaderContext(progressReader)
- fmt.Fprintf(args.Out, "Uploading %s\n", dstFile.Name)
- started := time.Now()
+ fmt.Fprintf(args.Out, "Uploading %s\n", dstFile.Name)
+ started := time.Now()
- f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size", "webContentLink").Context(ctx).Media(reader, chunkSize).Do()
- if err != nil {
- return fmt.Errorf("Failed to upload file: %s", err)
- }
+ f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size", "webContentLink").Context(ctx).Media(reader, chunkSize).Do()
+ if err != nil {
+ return fmt.Errorf("Failed to upload file: %s", err)
+ }
- // Calculate average upload rate
- rate := calcRate(f.Size, started, time.Now())
+ // Calculate average upload rate
+ rate := calcRate(f.Size, started, time.Now())
- fmt.Fprintf(args.Out, "Uploaded %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(f.Size, false))
- if args.Share {
- err = self.shareAnyoneReader(f.Id)
- if err != nil {
- return err
- }
+ fmt.Fprintf(args.Out, "Uploaded %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(f.Size, false))
+ if args.Share {
+ err = self.shareAnyoneReader(f.Id)
+ if err != nil {
+ return err
+ }
- fmt.Fprintf(args.Out, "File is readable by anyone at %s\n", f.WebContentLink)
- }
- return nil
+ fmt.Fprintf(args.Out, "File is readable by anyone at %s\n", f.WebContentLink)
+ }
+ return nil
}
diff --git a/drive/util.go b/drive/util.go
index 8891e12..181b9b9 100644
--- a/drive/util.go
+++ b/drive/util.go
@@ -1,169 +1,169 @@
package drive
import (
- "os"
- "fmt"
- "path/filepath"
- "strings"
- "strconv"
- "unicode/utf8"
- "math"
- "time"
+ "fmt"
+ "math"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
)
type kv struct {
- key string
- value string
+ key string
+ value string
}
func formatList(a []string) string {
- return strings.Join(a, ", ")
+ return strings.Join(a, ", ")
}
func formatSize(bytes int64, forceBytes bool) string {
- if bytes == 0 {
- return ""
- }
+ if bytes == 0 {
+ return ""
+ }
- if forceBytes {
- return fmt.Sprintf("%v B", bytes)
- }
+ if forceBytes {
+ return fmt.Sprintf("%v B", bytes)
+ }
- units := []string{"B", "KB", "MB", "GB", "TB", "PB"}
+ units := []string{"B", "KB", "MB", "GB", "TB", "PB"}
- var i int
- value := float64(bytes)
+ var i int
+ value := float64(bytes)
- for value > 1000 {
- value /= 1000
- i++
- }
- return fmt.Sprintf("%.1f %s", value, units[i])
+ for value > 1000 {
+ value /= 1000
+ i++
+ }
+ return fmt.Sprintf("%.1f %s", value, units[i])
}
func calcRate(bytes int64, start, end time.Time) int64 {
- seconds := float64(end.Sub(start).Seconds())
- if seconds < 1.0 {
- return bytes
- }
- return round(float64(bytes) / seconds)
+ seconds := float64(end.Sub(start).Seconds())
+ if seconds < 1.0 {
+ return bytes
+ }
+ return round(float64(bytes) / seconds)
}
func round(n float64) int64 {
- if n < 0 {
- return int64(math.Ceil(n - 0.5))
- }
- return int64(math.Floor(n + 0.5))
+ if n < 0 {
+ return int64(math.Ceil(n - 0.5))
+ }
+ return int64(math.Floor(n + 0.5))
}
func formatBool(b bool) string {
- return strings.Title(strconv.FormatBool(b))
+ return strings.Title(strconv.FormatBool(b))
}
func formatDatetime(iso string) string {
- t, err := time.Parse(time.RFC3339, iso)
- if err != nil {
- return iso
- }
- local := t.Local()
- year, month, day := local.Date()
- hour, min, sec := local.Clock()
- return fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, min, sec)
+ t, err := time.Parse(time.RFC3339, iso)
+ if err != nil {
+ return iso
+ }
+ local := t.Local()
+ year, month, day := local.Date()
+ hour, min, sec := local.Clock()
+ return fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, min, sec)
}
// Truncates string to given max length, and inserts ellipsis into
// the middle of the string to signify that the string has been truncated
func truncateString(str string, maxRunes int) string {
- indicator := "..."
+ indicator := "..."
- // Number of runes in string
- runeCount := utf8.RuneCountInString(str)
+ // Number of runes in string
+ runeCount := utf8.RuneCountInString(str)
- // Return input string if length of input string is less than max length
- // Input string is also returned if max length is less than 9 which is the minmal supported length
- if runeCount <= maxRunes || maxRunes < 9 {
- return str
- }
+ // Return input string if length of input string is less than max length
+ // Input string is also returned if max length is less than 9 which is the minmal supported length
+ if runeCount <= maxRunes || maxRunes < 9 {
+ return str
+ }
- // Number of remaining runes to be removed
- remaining := (runeCount - maxRunes) + utf8.RuneCountInString(indicator)
+ // Number of remaining runes to be removed
+ remaining := (runeCount - maxRunes) + utf8.RuneCountInString(indicator)
- var truncated string
- var skip bool
+ var truncated string
+ var skip bool
- for leftOffset, char := range str {
- rightOffset := runeCount - (leftOffset + remaining)
+ for leftOffset, char := range str {
+ rightOffset := runeCount - (leftOffset + remaining)
- // Start skipping chars when the left and right offsets are equal
- // Or in the case where we wont be able to do an even split: when the left offset is larger than the right offset
- if leftOffset == rightOffset || (leftOffset > rightOffset && !skip) {
- skip = true
- truncated += indicator
- }
+ // Start skipping chars when the left and right offsets are equal
+ // Or in the case where we wont be able to do an even split: when the left offset is larger than the right offset
+ if leftOffset == rightOffset || (leftOffset > rightOffset && !skip) {
+ skip = true
+ truncated += indicator
+ }
- if skip && remaining > 0 {
- // Skip char and decrement the remaining skip counter
- remaining--
- continue
- }
+ if skip && remaining > 0 {
+ // Skip char and decrement the remaining skip counter
+ remaining--
+ continue
+ }
- // Add char to result string
- truncated += string(char)
- }
+ // Add char to result string
+ truncated += string(char)
+ }
- // Return truncated string
- return truncated
+ // Return truncated string
+ return truncated
}
func fileExists(path string) bool {
- _, err := os.Stat(path)
- if err == nil {
- return true
- }
- return false
+ _, err := os.Stat(path)
+ if err == nil {
+ return true
+ }
+ return false
}
func mkdir(path string) error {
- dir := filepath.Dir(path)
- if fileExists(dir) {
- return nil
- }
- return os.MkdirAll(dir, 0775)
+ dir := filepath.Dir(path)
+ if fileExists(dir) {
+ return nil
+ }
+ return os.MkdirAll(dir, 0775)
}
func intMax() int64 {
- return 1 << (strconv.IntSize - 1) - 1
+ return 1<<(strconv.IntSize-1) - 1
}
func pathLength(path string) int {
- return strings.Count(path, string(os.PathSeparator))
+ return strings.Count(path, string(os.PathSeparator))
}
func parentFilePath(path string) string {
- dir, _ := filepath.Split(path)
- return filepath.Dir(dir)
+ dir, _ := filepath.Split(path)
+ return filepath.Dir(dir)
}
func pow(x int, y int) int {
- f := math.Pow(float64(x), float64(y))
- return int(f)
+ f := math.Pow(float64(x), float64(y))
+ return int(f)
}
func min(x int, y int) int {
- n := math.Min(float64(x), float64(y))
- return int(n)
+ n := math.Min(float64(x), float64(y))
+ return int(n)
}
func openFile(path string) (*os.File, os.FileInfo, error) {
- f, err := os.Open(path)
- if err != nil {
- return nil, nil, fmt.Errorf("Failed to open file: %s", err)
- }
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, nil, fmt.Errorf("Failed to open file: %s", err)
+ }
- info, err := f.Stat()
- if err != nil {
- return nil, nil, fmt.Errorf("Failed getting file metadata: %s", err)
- }
+ info, err := f.Stat()
+ if err != nil {
+ return nil, nil, fmt.Errorf("Failed getting file metadata: %s", err)
+ }
- return f, info, nil
+ return f, info, nil
}
diff --git a/gdrive.go b/gdrive.go
index d9181c6..94e000c 100644
--- a/gdrive.go
+++ b/gdrive.go
@@ -1,9 +1,9 @@
package main
import (
+ "./cli"
"fmt"
"os"
- "./cli"
)
const Name = "gdrive"
@@ -17,752 +17,752 @@ const DefaultUploadChunkSize = 8 * 1024 * 1024
const DefaultQuery = "trashed = false and 'me' in owners"
const DefaultShareRole = "reader"
const DefaultShareType = "anyone"
-var DefaultConfigDir = GetDefaultConfigDir()
+var DefaultConfigDir = GetDefaultConfigDir()
func main() {
- globalFlags := []cli.Flag{
- cli.StringFlag{
- Name: "configDir",
- Patterns: []string{"-c", "--config"},
- Description: fmt.Sprintf("Application path, default: %s", DefaultConfigDir),
- DefaultValue: DefaultConfigDir,
- },
- cli.StringFlag{
- Name: "refreshToken",
- Patterns: []string{"--refresh-token"},
- Description: "Oauth refresh token used to get access token (for advanced users)",
- },
- cli.StringFlag{
- Name: "accessToken",
- Patterns: []string{"--access-token"},
- Description: "Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users)",
- },
- }
+ globalFlags := []cli.Flag{
+ cli.StringFlag{
+ Name: "configDir",
+ Patterns: []string{"-c", "--config"},
+ Description: fmt.Sprintf("Application path, default: %s", DefaultConfigDir),
+ DefaultValue: DefaultConfigDir,
+ },
+ cli.StringFlag{
+ Name: "refreshToken",
+ Patterns: []string{"--refresh-token"},
+ Description: "Oauth refresh token used to get access token (for advanced users)",
+ },
+ cli.StringFlag{
+ Name: "accessToken",
+ Patterns: []string{"--access-token"},
+ Description: "Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users)",
+ },
+ }
- handlers := []*cli.Handler{
- &cli.Handler{
- Pattern: "[global] list [options]",
- Description: "List files",
- Callback: listHandler,
- FlagGroups: cli.FlagGroups{
- cli.NewFlagGroup("global", globalFlags...),
- cli.NewFlagGroup("options",
- cli.IntFlag{
- Name: "maxFiles",
- Patterns: []string{"-m", "--max"},
- Description: fmt.Sprintf("Max files to list, default: %d", DefaultMaxFiles),
- DefaultValue: DefaultMaxFiles,
- },
- cli.StringFlag{
- Name: "query",
- Patterns: []string{"-q", "--query"},
- Description: fmt.Sprintf(`Default query: "%s". See https://developers.google.com/drive/search-parameters`, DefaultQuery),
- DefaultValue: DefaultQuery,
- },
- cli.StringFlag{
- Name: "sortOrder",
- Patterns: []string{"--order"},
- Description: "Sort order. See https://godoc.org/google.golang.org/api/drive/v3#FilesListCall.OrderBy",
- },
- cli.IntFlag{
- Name: "nameWidth",
- Patterns: []string{"--name-width"},
- Description: fmt.Sprintf("Width of name column, default: %d, minimum: 9, use 0 for full width", DefaultNameWidth),
- DefaultValue: DefaultNameWidth,
- },
- cli.BoolFlag{
- Name: "absPath",
- Patterns: []string{"--absolute"},
- Description: "Show absolute path to file (will only show path from first parent)",
- OmitValue: true,
- },
- cli.BoolFlag{
- Name: "skipHeader",
- Patterns: []string{"--no-header"},
- Description: "Dont print the header",
- OmitValue: true,
- },
- cli.BoolFlag{
- Name: "sizeInBytes",
- Patterns: []string{"--bytes"},
- Description: "Size in bytes",
- OmitValue: true,
- },
- ),
- },
- },
- &cli.Handler{
- Pattern: "[global] download [options] <fileId>",
- Description: "Download file or directory",
- Callback: downloadHandler,
- FlagGroups: cli.FlagGroups{
- cli.NewFlagGroup("global", globalFlags...),
- cli.NewFlagGroup("options",
- cli.BoolFlag{
- Name: "force",
- Patterns: []string{"-f", "--force"},
- Description: "Overwrite existing file",
- OmitValue: true,
- },
- cli.BoolFlag{
- Name: "recursive",
- Patterns: []string{"-r", "--recursive"},
- Description: "Download directory recursively, documents will be skipped",
- OmitValue: true,
- },
- cli.StringFlag{
- Name: "path",
- Patterns: []string{"--path"},
- Description: "Download path",
- },
- cli.BoolFlag{
- Name: "delete",
- Patterns: []string{"--delete"},
- Description: "Delete remote file when download is successful",
- OmitValue: true,
- },
- cli.BoolFlag{
- Name: "noProgress",
- Patterns: []string{"--no-progress"},
- Description: "Hide progress",
- OmitValue: true,
- },
- cli.BoolFlag{
- Name: "stdout",
- Patterns: []string{"--stdout"},
- Description: "Write file content to stdout",
- OmitValue: true,
- },
- ),
- },
- },
- &cli.Handler{
- Pattern: "[global] download query [options] <query>",
- Description: "Download all files and directories matching query",
- Callback: downloadQueryHandler,
- FlagGroups: cli.FlagGroups{
- cli.NewFlagGroup("global", globalFlags...),
- cli.NewFlagGroup("options",
- cli.BoolFlag{
- Name: "force",
- Patterns: []string{"-f", "--force"},
- Description: "Overwrite existing file",
- OmitValue: true,
- },
- cli.BoolFlag{
- Name: "recursive",
- Patterns: []string{"-r", "--recursive"},
- Description: "Download directories recursively, documents will be skipped",
- OmitValue: true,
- },
- cli.StringFlag{
- Name: "path",
- Patterns: []string{"--path"},
- Description: "Download path",
- },
- cli.BoolFlag{
- Name: "noProgress",
- Patterns: []string{"--no-progress"},
- Description: "Hide progress",
- OmitValue: true,
- },
- ),
- },
- },
- &cli.Handler{
- Pattern: "[global] upload [options] <path>",
- Description: "Upload file or directory",
- Callback: uploadHandler,
- FlagGroups: cli.FlagGroups{
- cli.NewFlagGroup("global", globalFlags...),
- cli.NewFlagGroup("options",
- cli.BoolFlag{
- Name: "recursive",
- Patterns: []string{"-r", "--recursive"},
- Description: "Upload directory recursively",
- OmitValue: true,
- },
- cli.StringSliceFlag{
- Name: "parent",
- Patterns: []string{"-p", "--parent"},
- Description: "Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents",
- },
- cli.StringFlag{
- Name: "name",
- Patterns: []string{"--name"},
- Description: "Filename",
- },
- cli.BoolFlag{
- Name: "noProgress",
- Patterns: []string{"--no-progress"},
- Description: "Hide progress",
- OmitValue: true,
- },
- cli.StringFlag{
- Name: "mime",
- Patterns: []string{"--mime"},
- Description: "Force mime type",
- },
- cli.BoolFlag{
- Name: "share",
- Patterns: []string{"--share"},
- Description: "Share file",
- OmitValue: true,
- },
- cli.BoolFlag{
- Name: "delete",
- Patterns: []string{"--delete"},
- Description: "Delete local file when upload is successful",
- OmitValue: true,
- },
- cli.IntFlag{
- Name: "chunksize",
- Patterns: []string{"--chunksize"},
- Description: fmt.Sprintf("Set chunk size in bytes, default: %d", DefaultUploadChunkSize),
- DefaultValue: DefaultUploadChunkSize,
- },
- ),
- },
- },
- &cli.Handler{
- Pattern: "[global] upload - [options] <name>",
- Description: "Upload file from stdin",
- Callback: uploadStdinHandler,
- FlagGroups: cli.FlagGroups{
- cli.NewFlagGroup("global", globalFlags...),
- cli.NewFlagGroup("options",
- cli.StringSliceFlag{
- Name: "parent",
- Patterns: []string{"-p", "--parent"},
- Description: "Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents",
- },
- cli.IntFlag{
- Name: "chunksize",
- Patterns: []string{"--chunksize"},
- Description: fmt.Sprintf("Set chunk size in bytes, default: %d", DefaultUploadChunkSize),
- DefaultValue: DefaultUploadChunkSize,
- },
- cli.StringFlag{
- Name: "mime",
- Patterns: []string{"--mime"},
- Description: "Force mime type",
- },
- cli.BoolFlag{
- Name: "share",
- Patterns: []string{"--share"},
- Description: "Share file",
- OmitValue: true,
- },
- cli.BoolFlag{
- Name: "noProgress",
- Patterns: []string{"--no-progress"},
- Description: "Hide progress",
- OmitValue: true,
- },
- ),
- },
- },
- &cli.Handler{
- Pattern: "[global] update [options] <fileId> <path>",
- Description: "Update file, this creates a new revision of the file",
- Callback: updateHandler,
- FlagGroups: cli.FlagGroups{
- cli.NewFlagGroup("global", globalFlags...),
- cli.NewFlagGroup("options",
- cli.StringSliceFlag{
- Name: "parent",
- Patterns: []string{"-p", "--parent"},
- Description: "Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents",
- },
- cli.StringFlag{
- Name: "name",
- Patterns: []string{"--name"},
- Description: "Filename",
- },
- cli.BoolFlag{
- Name: "noProgress",
- Patterns: []string{"--no-progress"},
- Description: "Hide progress",
- OmitValue: true,
- },
- cli.StringFlag{
- Name: "mime",
- Patterns: []string{"--mime"},
- Description: "Force mime type",
- },
- cli.IntFlag{
- Name: "chunksize",
- Patterns: []string{"--chunksize"},
- Description: fmt.Sprintf("Set chunk size in bytes, default: %d", DefaultUploadChunkSize),
- DefaultValue: DefaultUploadChunkSize,
- },
- ),
- },
- },
- &cli.Handler{
- Pattern: "[global] info [options] <fileId>",
- Description: "Show file info",
- Callback: infoHandler,
- FlagGroups: cli.FlagGroups{
- cli.NewFlagGroup("global", globalFlags...),
- cli.NewFlagGroup("options",
- cli.BoolFlag{
- Name: "sizeInBytes",
- Patterns: []string{"--bytes"},
- Description: "Show size in bytes",
- OmitValue: true,
- },
- ),
- },
- },
- &cli.Handler{
- Pattern: "[global] mkdir [options] <name>",
- Description: "Create directory",
- Callback: mkdirHandler,
- FlagGroups: cli.FlagGroups{
- cli.NewFlagGroup("global", globalFlags...),
- cli.NewFlagGroup("options",
- cli.StringSliceFlag{
- Name: "parent",
- Patterns: []string{"-p", "--parent"},
- Description: "Parent id of created directory, can be specified multiple times to give many parents",
- },
- ),
- },
- },
- &cli.Handler{
- Pattern: "[global] share [options] <fileId>",
- Description: "Share file or directory",
- Callback: shareHandler,
- FlagGroups: cli.FlagGroups{
- cli.NewFlagGroup("global", globalFlags...),
- cli.NewFlagGroup("options",
- cli.StringFlag{
- Name: "role",
- Patterns: []string{"--role"},
- Description: fmt.Sprintf("Share role: owner/writer/commenter/reader, default: %s", DefaultShareRole),
- DefaultValue: DefaultShareRole,
- },
- cli.StringFlag{
- Name: "type",
- Patterns: []string{"--type"},
- Description: fmt.Sprintf("Share type: user/group/domain/anyone, default: %s", DefaultShareType),
- DefaultValue: DefaultShareType,
- },
- cli.StringFlag{
- Name: "email",
- Patterns: []string{"--email"},
- Description: "The email address of the user or group to share the file with. Requires 'user' or 'group' as type",
- },
- cli.BoolFlag{
- Name: "discoverable",
- Patterns: []string{"--discoverable"},
- Description: "Make file discoverable by search engines",
- OmitValue: true,
- },
- cli.BoolFlag{
- Name: "revoke",
- Patterns: []string{"--revoke"},
- Description: "Delete all sharing permissions (owner roles will be skipped)",
- OmitValue: true,
- },
- ),
- },
- },
- &cli.Handler{
- Pattern: "[global] share list <fileId>",
- Description: "List files permissions",
- Callback: shareListHandler,
- FlagGroups: cli.FlagGroups{
- cli.NewFlagGroup("global", globalFlags...),
- },
- },
- &cli.Handler{
- Pattern: "[global] share revoke <fileId> <permissionId>",
- Description: "Revoke permission",
- Callback: shareRevokeHandler,
- FlagGroups: cli.FlagGroups{
- cli.NewFlagGroup("global", globalFlags...),
- },
- },
- &cli.Handler{
- Pattern: "[global] delete [options] <fileId>",
- Description: "Delete file or directory",
- Callback: deleteHandler,
- FlagGroups: cli.FlagGroups{
- cli.NewFlagGroup("global", globalFlags...),
- cli.NewFlagGroup("options",
- cli.BoolFlag{
- Name: "recursive",
- Patterns: []string{"-r", "--recursive"},
- Description: "Delete directory and all it's content",
- OmitValue: true,
- },
- ),
- },
- },
- &cli.Handler{
- Pattern: "[global] sync list [options]",
- Description: "List all syncable directories on drive",
- Callback: listSyncHandler,
- FlagGroups: cli.FlagGroups{
- cli.NewFlagGroup("global", globalFlags...),
- cli.NewFlagGroup("options",
- cli.BoolFlag{
- Name: "skipHeader",
- Patterns: []string{"--no-header"},
- Description: "Dont print the header",
- OmitValue: true,
- },
- ),
- },
- },
- &cli.Handler{
- Pattern: "[global] sync list content [options] <fileId>",
- Description: "List content of syncable directory",
- Callback: listRecursiveSyncHandler,
- FlagGroups: cli.FlagGroups{
- cli.NewFlagGroup("global", globalFlags...),
- cli.NewFlagGroup("options",
- cli.StringFlag{
- Name: "sortOrder",
- Patterns: []string{"--order"},
- Description: "Sort order. See https://godoc.org/google.golang.org/api/drive/v3#FilesListCall.OrderBy",
- },
- cli.IntFlag{
- Name: "pathWidth",
- Patterns: []string{"--path-width"},
- Description: fmt.Sprintf("Width of path column, default: %d, minimum: 9, use 0 for full width", DefaultPathWidth),
- DefaultValue: DefaultPathWidth,
- },
- cli.BoolFlag{
- Name: "skipHeader",
- Patterns: []string{"--no-header"},
- Description: "Dont print the header",
- OmitValue: true,
- },
- cli.BoolFlag{
- Name: "sizeInBytes",
- Patterns: []string{"--bytes"},
- Description: "Size in bytes",
- OmitValue: true,
- },
- ),
- },
- },
- &cli.Handler{
- Pattern: "[global] sync download [options] <fileId> <path>",
- Description: "Sync drive directory to local directory",
- Callback: downloadSyncHandler,
- FlagGroups: cli.FlagGroups{
- cli.NewFlagGroup("global", globalFlags...),
- cli.NewFlagGroup("options",
- cli.BoolFlag{
- Name: "keepRemote",
- Patterns: []string{"--keep-remote"},
- Description: "Keep remote file when a conflict is encountered",
- OmitValue: true,
- },
- cli.BoolFlag{
- Name: "keepLocal",
- Patterns: []string{"--keep-local"},
- Description: "Keep local file when a conflict is encountered",
- OmitValue: true,
- },
- cli.BoolFlag{
- Name: "keepLargest",
- Patterns: []string{"--keep-largest"},
- Description: "Keep largest file when a conflict is encountered",
- OmitValue: true,
- },
- cli.BoolFlag{
- Name: "deleteExtraneous",
- Patterns: []string{"--delete-extraneous"},
- Description: "Delete extraneous local files",
- OmitValue: true,
- },
- cli.BoolFlag{
- Name: "dryRun",
- Patterns: []string{"--dry-run"},
- Description: "Show what would have been transferred",
- OmitValue: true,
- },
- cli.BoolFlag{
- Name: "noProgress",
- Patterns: []string{"--no-progress"},
- Description: "Hide progress",
- OmitValue: true,
- },
- ),
- },
- },
- &cli.Handler{
- Pattern: "[global] sync upload [options] <path> <fileId>",
- Description: "Sync local directory to drive",
- Callback: uploadSyncHandler,
- FlagGroups: cli.FlagGroups{
- cli.NewFlagGroup("global", globalFlags...),
- cli.NewFlagGroup("options",
- cli.BoolFlag{
- Name: "keepRemote",
- Patterns: []string{"--keep-remote"},
- Description: "Keep remote file when a conflict is encountered",
- OmitValue: true,
- },
- cli.BoolFlag{
- Name: "keepLocal",
- Patterns: []string{"--keep-local"},
- Description: "Keep local file when a conflict is encountered",
- OmitValue: true,
- },
- cli.BoolFlag{
- Name: "keepLargest",
- Patterns: []string{"--keep-largest"},
- Description: "Keep largest file when a conflict is encountered",
- OmitValue: true,
- },
- cli.BoolFlag{
- Name: "deleteExtraneous",
- Patterns: []string{"--delete-extraneous"},
- Description: "Delete extraneous remote files",
- OmitValue: true,
- },
- cli.BoolFlag{
- Name: "dryRun",
- Patterns: []string{"--dry-run"},
- Description: "Show what would have been transferred",
- OmitValue: true,
- },
- cli.BoolFlag{
- Name: "noProgress",
- Patterns: []string{"--no-progress"},
- Description: "Hide progress",
- OmitValue: true,
- },
- cli.IntFlag{
- Name: "chunksize",
- Patterns: []string{"--chunksize"},
- Description: fmt.Sprintf("Set chunk size in bytes, default: %d", DefaultUploadChunkSize),
- DefaultValue: DefaultUploadChunkSize,
- },
- ),
- },
- },
- &cli.Handler{
- Pattern: "[global] changes [options]",
- Description: "List file changes",
- Callback: listChangesHandler,
- FlagGroups: cli.FlagGroups{
- cli.NewFlagGroup("global", globalFlags...),
- cli.NewFlagGroup("options",
- cli.IntFlag{
- Name: "maxChanges",
- Patterns: []string{"-m", "--max"},
- Description: fmt.Sprintf("Max changes to list, default: %d", DefaultMaxChanges),
- DefaultValue: DefaultMaxChanges,
- },
- cli.StringFlag{
- Name: "pageToken",
- Patterns: []string{"--since"},
- Description: fmt.Sprintf("Page token to start listing changes from"),
- DefaultValue: "1",
- },
- cli.BoolFlag{
- Name: "now",
- Patterns: []string{"--now"},
- Description: fmt.Sprintf("Get latest page token"),
- OmitValue: true,
- },
- cli.IntFlag{
- Name: "nameWidth",
- Patterns: []string{"--name-width"},
- Description: fmt.Sprintf("Width of name column, default: %d, minimum: 9, use 0 for full width", DefaultNameWidth),
- DefaultValue: DefaultNameWidth,
- },
- cli.BoolFlag{
- Name: "skipHeader",
- Patterns: []string{"--no-header"},
- Description: "Dont print the header",
- OmitValue: true,
- },
- ),
- },
- },
- &cli.Handler{
- Pattern: "[global] revision list [options] <fileId>",
- Description: "List file revisions",
- Callback: listRevisionsHandler,
- FlagGroups: cli.FlagGroups{
- cli.NewFlagGroup("global", globalFlags...),
- cli.NewFlagGroup("options",
- cli.IntFlag{
- Name: "nameWidth",
- Patterns: []string{"--name-width"},
- Description: fmt.Sprintf("Width of name column, default: %d, minimum: 9, use 0 for full width", DefaultNameWidth),
- DefaultValue: DefaultNameWidth,
- },
- cli.BoolFlag{
- Name: "skipHeader",
- Patterns: []string{"--no-header"},
- Description: "Dont print the header",
- OmitValue: true,
- },
- cli.BoolFlag{
- Name: "sizeInBytes",
- Patterns: []string{"--bytes"},
- Description: "Size in bytes",
- OmitValue: true,
- },
- ),
- },
- },
- &cli.Handler{
- Pattern: "[global] revision download [options] <fileId> <revisionId>",
- Description: "Download revision",
- Callback: downloadRevisionHandler,
- FlagGroups: cli.FlagGroups{
- cli.NewFlagGroup("global", globalFlags...),
- cli.NewFlagGroup("options",
- cli.BoolFlag{
- Name: "force",
- Patterns: []string{"-f", "--force"},
- Description: "Overwrite existing file",
- OmitValue: true,
- },
- cli.BoolFlag{
- Name: "noProgress",
- Patterns: []string{"--no-progress"},
- Description: "Hide progress",
- OmitValue: true,
- },
- cli.BoolFlag{
- Name: "stdout",
- Patterns: []string{"--stdout"},
- Description: "Write file content to stdout",
- OmitValue: true,
- },
- cli.StringFlag{
- Name: "path",
- Patterns: []string{"--path"},
- Description: "Download path",
- },
- ),
- },
- },
- &cli.Handler{
- Pattern: "[global] revision delete <fileId> <revisionId>",
- Description: "Delete file revision",
- Callback: deleteRevisionHandler,
- FlagGroups: cli.FlagGroups{
- cli.NewFlagGroup("global", globalFlags...),
- },
- },
- &cli.Handler{
- Pattern: "[global] import [options] <path>",
- Description: "Upload and convert file to a google document, see 'about import' for available conversions",
- Callback: importHandler,
- FlagGroups: cli.FlagGroups{
- cli.NewFlagGroup("global", globalFlags...),
- cli.NewFlagGroup("options",
- cli.StringSliceFlag{
- Name: "parent",
- Patterns: []string{"-p", "--parent"},
- Description: "Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents",
- },
- cli.BoolFlag{
- Name: "noProgress",
- Patterns: []string{"--no-progress"},
- Description: "Hide progress",
- OmitValue: true,
- },
- ),
- },
- },
- &cli.Handler{
- Pattern: "[global] export [options] <fileId>",
- Description: "Export a google document",
- Callback: exportHandler,
- FlagGroups: cli.FlagGroups{
- cli.NewFlagGroup("global", globalFlags...),
- cli.NewFlagGroup("options",
- cli.BoolFlag{
- Name: "force",
- Patterns: []string{"-f", "--force"},
- Description: "Overwrite existing file",
- OmitValue: true,
- },
- cli.StringFlag{
- Name: "mime",
- Patterns: []string{"--mime"},
- Description: "Mime type of exported file",
- },
- cli.BoolFlag{
- Name: "printMimes",
- Patterns: []string{"--print-mimes"},
- Description: "Print available mime types for given file",
- OmitValue: true,
- },
- ),
- },
- },
- &cli.Handler{
- Pattern: "[global] about [options]",
- Description: "Google drive metadata, quota usage",
- Callback: aboutHandler,
- FlagGroups: cli.FlagGroups{
- cli.NewFlagGroup("global", globalFlags...),
- cli.NewFlagGroup("options",
- cli.BoolFlag{
- Name: "sizeInBytes",
- Patterns: []string{"--bytes"},
- Description: "Show size in bytes",
- OmitValue: true,
- },
- ),
- },
- },
- &cli.Handler{
- Pattern: "[global] about import",
- Description: "Show supported import formats",
- Callback: aboutImportHandler,
- FlagGroups: cli.FlagGroups{
- cli.NewFlagGroup("global", globalFlags...),
- },
- },
- &cli.Handler{
- Pattern: "[global] about export",
- Description: "Show supported export formats",
- Callback: aboutExportHandler,
- FlagGroups: cli.FlagGroups{
- cli.NewFlagGroup("global", globalFlags...),
- },
- },
- &cli.Handler{
- Pattern: "version",
- Description: "Print application version",
- Callback: printVersion,
- },
- &cli.Handler{
- Pattern: "help",
- Description: "Print help",
- Callback: printHelp,
- },
- &cli.Handler{
- Pattern: "help <command>",
- Description: "Print command help",
- Callback: printCommandHelp,
- },
- &cli.Handler{
- Pattern: "help <command> <subcommand>",
- Description: "Print subcommand help",
- Callback: printSubCommandHelp,
- },
- }
+ handlers := []*cli.Handler{
+ &cli.Handler{
+ Pattern: "[global] list [options]",
+ Description: "List files",
+ Callback: listHandler,
+ FlagGroups: cli.FlagGroups{
+ cli.NewFlagGroup("global", globalFlags...),
+ cli.NewFlagGroup("options",
+ cli.IntFlag{
+ Name: "maxFiles",
+ Patterns: []string{"-m", "--max"},
+ Description: fmt.Sprintf("Max files to list, default: %d", DefaultMaxFiles),
+ DefaultValue: DefaultMaxFiles,
+ },
+ cli.StringFlag{
+ Name: "query",
+ Patterns: []string{"-q", "--query"},
+ Description: fmt.Sprintf(`Default query: "%s". See https://developers.google.com/drive/search-parameters`, DefaultQuery),
+ DefaultValue: DefaultQuery,
+ },
+ cli.StringFlag{
+ Name: "sortOrder",
+ Patterns: []string{"--order"},
+ Description: "Sort order. See https://godoc.org/google.golang.org/api/drive/v3#FilesListCall.OrderBy",
+ },
+ cli.IntFlag{
+ Name: "nameWidth",
+ Patterns: []string{"--name-width"},
+ Description: fmt.Sprintf("Width of name column, default: %d, minimum: 9, use 0 for full width", DefaultNameWidth),
+ DefaultValue: DefaultNameWidth,
+ },
+ cli.BoolFlag{
+ Name: "absPath",
+ Patterns: []string{"--absolute"},
+ Description: "Show absolute path to file (will only show path from first parent)",
+ OmitValue: true,
+ },
+ cli.BoolFlag{
+ Name: "skipHeader",
+ Patterns: []string{"--no-header"},
+ Description: "Dont print the header",
+ OmitValue: true,
+ },
+ cli.BoolFlag{
+ Name: "sizeInBytes",
+ Patterns: []string{"--bytes"},
+ Description: "Size in bytes",
+ OmitValue: true,
+ },
+ ),
+ },
+ },
+ &cli.Handler{
+ Pattern: "[global] download [options] <fileId>",
+ Description: "Download file or directory",
+ Callback: downloadHandler,
+ FlagGroups: cli.FlagGroups{
+ cli.NewFlagGroup("global", globalFlags...),
+ cli.NewFlagGroup("options",
+ cli.BoolFlag{
+ Name: "force",
+ Patterns: []string{"-f", "--force"},
+ Description: "Overwrite existing file",
+ OmitValue: true,
+ },
+ cli.BoolFlag{
+ Name: "recursive",
+ Patterns: []string{"-r", "--recursive"},
+ Description: "Download directory recursively, documents will be skipped",
+ OmitValue: true,
+ },
+ cli.StringFlag{
+ Name: "path",
+ Patterns: []string{"--path"},
+ Description: "Download path",
+ },
+ cli.BoolFlag{
+ Name: "delete",
+ Patterns: []string{"--delete"},
+ Description: "Delete remote file when download is successful",
+ OmitValue: true,
+ },
+ cli.BoolFlag{
+ Name: "noProgress",
+ Patterns: []string{"--no-progress"},
+ Description: "Hide progress",
+ OmitValue: true,
+ },
+ cli.BoolFlag{
+ Name: "stdout",
+ Patterns: []string{"--stdout"},
+ Description: "Write file content to stdout",
+ OmitValue: true,
+ },
+ ),
+ },
+ },
+ &cli.Handler{
+ Pattern: "[global] download query [options] <query>",
+ Description: "Download all files and directories matching query",
+ Callback: downloadQueryHandler,
+ FlagGroups: cli.FlagGroups{
+ cli.NewFlagGroup("global", globalFlags...),
+ cli.NewFlagGroup("options",
+ cli.BoolFlag{
+ Name: "force",
+ Patterns: []string{"-f", "--force"},
+ Description: "Overwrite existing file",
+ OmitValue: true,
+ },
+ cli.BoolFlag{
+ Name: "recursive",
+ Patterns: []string{"-r", "--recursive"},
+ Description: "Download directories recursively, documents will be skipped",
+ OmitValue: true,
+ },
+ cli.StringFlag{
+ Name: "path",
+ Patterns: []string{"--path"},
+ Description: "Download path",
+ },
+ cli.BoolFlag{
+ Name: "noProgress",
+ Patterns: []string{"--no-progress"},
+ Description: "Hide progress",
+ OmitValue: true,
+ },
+ ),
+ },
+ },
+ &cli.Handler{
+ Pattern: "[global] upload [options] <path>",
+ Description: "Upload file or directory",
+ Callback: uploadHandler,
+ FlagGroups: cli.FlagGroups{
+ cli.NewFlagGroup("global", globalFlags...),
+ cli.NewFlagGroup("options",
+ cli.BoolFlag{
+ Name: "recursive",
+ Patterns: []string{"-r", "--recursive"},
+ Description: "Upload directory recursively",
+ OmitValue: true,
+ },
+ cli.StringSliceFlag{
+ Name: "parent",
+ Patterns: []string{"-p", "--parent"},
+ Description: "Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents",
+ },
+ cli.StringFlag{
+ Name: "name",
+ Patterns: []string{"--name"},
+ Description: "Filename",
+ },
+ cli.BoolFlag{
+ Name: "noProgress",
+ Patterns: []string{"--no-progress"},
+ Description: "Hide progress",
+ OmitValue: true,
+ },
+ cli.StringFlag{
+ Name: "mime",
+ Patterns: []string{"--mime"},
+ Description: "Force mime type",
+ },
+ cli.BoolFlag{
+ Name: "share",
+ Patterns: []string{"--share"},
+ Description: "Share file",
+ OmitValue: true,
+ },
+ cli.BoolFlag{
+ Name: "delete",
+ Patterns: []string{"--delete"},
+ Description: "Delete local file when upload is successful",
+ OmitValue: true,
+ },
+ cli.IntFlag{
+ Name: "chunksize",
+ Patterns: []string{"--chunksize"},
+ Description: fmt.Sprintf("Set chunk size in bytes, default: %d", DefaultUploadChunkSize),
+ DefaultValue: DefaultUploadChunkSize,
+ },
+ ),
+ },
+ },
+ &cli.Handler{
+ Pattern: "[global] upload - [options] <name>",
+ Description: "Upload file from stdin",
+ Callback: uploadStdinHandler,
+ FlagGroups: cli.FlagGroups{
+ cli.NewFlagGroup("global", globalFlags...),
+ cli.NewFlagGroup("options",
+ cli.StringSliceFlag{
+ Name: "parent",
+ Patterns: []string{"-p", "--parent"},
+ Description: "Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents",
+ },
+ cli.IntFlag{
+ Name: "chunksize",
+ Patterns: []string{"--chunksize"},
+ Description: fmt.Sprintf("Set chunk size in bytes, default: %d", DefaultUploadChunkSize),
+ DefaultValue: DefaultUploadChunkSize,
+ },
+ cli.StringFlag{
+ Name: "mime",
+ Patterns: []string{"--mime"},
+ Description: "Force mime type",
+ },
+ cli.BoolFlag{
+ Name: "share",
+ Patterns: []string{"--share"},
+ Description: "Share file",
+ OmitValue: true,
+ },
+ cli.BoolFlag{
+ Name: "noProgress",
+ Patterns: []string{"--no-progress"},
+ Description: "Hide progress",
+ OmitValue: true,
+ },
+ ),
+ },
+ },
+ &cli.Handler{
+ Pattern: "[global] update [options] <fileId> <path>",
+ Description: "Update file, this creates a new revision of the file",
+ Callback: updateHandler,
+ FlagGroups: cli.FlagGroups{
+ cli.NewFlagGroup("global", globalFlags...),
+ cli.NewFlagGroup("options",
+ cli.StringSliceFlag{
+ Name: "parent",
+ Patterns: []string{"-p", "--parent"},
+ Description: "Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents",
+ },
+ cli.StringFlag{
+ Name: "name",
+ Patterns: []string{"--name"},
+ Description: "Filename",
+ },
+ cli.BoolFlag{
+ Name: "noProgress",
+ Patterns: []string{"--no-progress"},
+ Description: "Hide progress",
+ OmitValue: true,
+ },
+ cli.StringFlag{
+ Name: "mime",
+ Patterns: []string{"--mime"},
+ Description: "Force mime type",
+ },
+ cli.IntFlag{
+ Name: "chunksize",
+ Patterns: []string{"--chunksize"},
+ Description: fmt.Sprintf("Set chunk size in bytes, default: %d", DefaultUploadChunkSize),
+ DefaultValue: DefaultUploadChunkSize,
+ },
+ ),
+ },
+ },
+ &cli.Handler{
+ Pattern: "[global] info [options] <fileId>",
+ Description: "Show file info",
+ Callback: infoHandler,
+ FlagGroups: cli.FlagGroups{
+ cli.NewFlagGroup("global", globalFlags...),
+ cli.NewFlagGroup("options",
+ cli.BoolFlag{
+ Name: "sizeInBytes",
+ Patterns: []string{"--bytes"},
+ Description: "Show size in bytes",
+ OmitValue: true,
+ },
+ ),
+ },
+ },
+ &cli.Handler{
+ Pattern: "[global] mkdir [options] <name>",
+ Description: "Create directory",
+ Callback: mkdirHandler,
+ FlagGroups: cli.FlagGroups{
+ cli.NewFlagGroup("global", globalFlags...),
+ cli.NewFlagGroup("options",
+ cli.StringSliceFlag{
+ Name: "parent",
+ Patterns: []string{"-p", "--parent"},
+ Description: "Parent id of created directory, can be specified multiple times to give many parents",
+ },
+ ),
+ },
+ },
+ &cli.Handler{
+ Pattern: "[global] share [options] <fileId>",
+ Description: "Share file or directory",
+ Callback: shareHandler,
+ FlagGroups: cli.FlagGroups{
+ cli.NewFlagGroup("global", globalFlags...),
+ cli.NewFlagGroup("options",
+ cli.StringFlag{
+ Name: "role",
+ Patterns: []string{"--role"},
+ Description: fmt.Sprintf("Share role: owner/writer/commenter/reader, default: %s", DefaultShareRole),
+ DefaultValue: DefaultShareRole,
+ },
+ cli.StringFlag{
+ Name: "type",
+ Patterns: []string{"--type"},
+ Description: fmt.Sprintf("Share type: user/group/domain/anyone, default: %s", DefaultShareType),
+ DefaultValue: DefaultShareType,
+ },
+ cli.StringFlag{
+ Name: "email",
+ Patterns: []string{"--email"},
+ Description: "The email address of the user or group to share the file with. Requires 'user' or 'group' as type",
+ },
+ cli.BoolFlag{
+ Name: "discoverable",
+ Patterns: []string{"--discoverable"},
+ Description: "Make file discoverable by search engines",
+ OmitValue: true,
+ },
+ cli.BoolFlag{
+ Name: "revoke",
+ Patterns: []string{"--revoke"},
+ Description: "Delete all sharing permissions (owner roles will be skipped)",
+ OmitValue: true,
+ },
+ ),
+ },
+ },
+ &cli.Handler{
+ Pattern: "[global] share list <fileId>",
+ Description: "List files permissions",
+ Callback: shareListHandler,
+ FlagGroups: cli.FlagGroups{
+ cli.NewFlagGroup("global", globalFlags...),
+ },
+ },
+ &cli.Handler{
+ Pattern: "[global] share revoke <fileId> <permissionId>",
+ Description: "Revoke permission",
+ Callback: shareRevokeHandler,
+ FlagGroups: cli.FlagGroups{
+ cli.NewFlagGroup("global", globalFlags...),
+ },
+ },
+ &cli.Handler{
+ Pattern: "[global] delete [options] <fileId>",
+ Description: "Delete file or directory",
+ Callback: deleteHandler,
+ FlagGroups: cli.FlagGroups{
+ cli.NewFlagGroup("global", globalFlags...),
+ cli.NewFlagGroup("options",
+ cli.BoolFlag{
+ Name: "recursive",
+ Patterns: []string{"-r", "--recursive"},
+ Description: "Delete directory and all it's content",
+ OmitValue: true,
+ },
+ ),
+ },
+ },
+ &cli.Handler{
+ Pattern: "[global] sync list [options]",
+ Description: "List all syncable directories on drive",
+ Callback: listSyncHandler,
+ FlagGroups: cli.FlagGroups{
+ cli.NewFlagGroup("global", globalFlags...),
+ cli.NewFlagGroup("options",
+ cli.BoolFlag{
+ Name: "skipHeader",
+ Patterns: []string{"--no-header"},
+ Description: "Dont print the header",
+ OmitValue: true,
+ },
+ ),
+ },
+ },
+ &cli.Handler{
+ Pattern: "[global] sync list content [options] <fileId>",
+ Description: "List content of syncable directory",
+ Callback: listRecursiveSyncHandler,
+ FlagGroups: cli.FlagGroups{
+ cli.NewFlagGroup("global", globalFlags...),
+ cli.NewFlagGroup("options",
+ cli.StringFlag{
+ Name: "sortOrder",
+ Patterns: []string{"--order"},
+ Description: "Sort order. See https://godoc.org/google.golang.org/api/drive/v3#FilesListCall.OrderBy",
+ },
+ cli.IntFlag{
+ Name: "pathWidth",
+ Patterns: []string{"--path-width"},
+ Description: fmt.Sprintf("Width of path column, default: %d, minimum: 9, use 0 for full width", DefaultPathWidth),
+ DefaultValue: DefaultPathWidth,
+ },
+ cli.BoolFlag{
+ Name: "skipHeader",
+ Patterns: []string{"--no-header"},
+ Description: "Dont print the header",
+ OmitValue: true,
+ },
+ cli.BoolFlag{
+ Name: "sizeInBytes",
+ Patterns: []string{"--bytes"},
+ Description: "Size in bytes",
+ OmitValue: true,
+ },
+ ),
+ },
+ },
+ &cli.Handler{
+ Pattern: "[global] sync download [options] <fileId> <path>",
+ Description: "Sync drive directory to local directory",
+ Callback: downloadSyncHandler,
+ FlagGroups: cli.FlagGroups{
+ cli.NewFlagGroup("global", globalFlags...),
+ cli.NewFlagGroup("options",
+ cli.BoolFlag{
+ Name: "keepRemote",
+ Patterns: []string{"--keep-remote"},
+ Description: "Keep remote file when a conflict is encountered",
+ OmitValue: true,
+ },
+ cli.BoolFlag{
+ Name: "keepLocal",
+ Patterns: []string{"--keep-local"},
+ Description: "Keep local file when a conflict is encountered",
+ OmitValue: true,
+ },
+ cli.BoolFlag{
+ Name: "keepLargest",
+ Patterns: []string{"--keep-largest"},
+ Description: "Keep largest file when a conflict is encountered",
+ OmitValue: true,
+ },
+ cli.BoolFlag{
+ Name: "deleteExtraneous",
+ Patterns: []string{"--delete-extraneous"},
+ Description: "Delete extraneous local files",
+ OmitValue: true,
+ },
+ cli.BoolFlag{
+ Name: "dryRun",
+ Patterns: []string{"--dry-run"},
+ Description: "Show what would have been transferred",
+ OmitValue: true,
+ },
+ cli.BoolFlag{
+ Name: "noProgress",
+ Patterns: []string{"--no-progress"},
+ Description: "Hide progress",
+ OmitValue: true,
+ },
+ ),
+ },
+ },
+ &cli.Handler{
+ Pattern: "[global] sync upload [options] <path> <fileId>",
+ Description: "Sync local directory to drive",
+ Callback: uploadSyncHandler,
+ FlagGroups: cli.FlagGroups{
+ cli.NewFlagGroup("global", globalFlags...),
+ cli.NewFlagGroup("options",
+ cli.BoolFlag{
+ Name: "keepRemote",
+ Patterns: []string{"--keep-remote"},
+ Description: "Keep remote file when a conflict is encountered",
+ OmitValue: true,
+ },
+ cli.BoolFlag{
+ Name: "keepLocal",
+ Patterns: []string{"--keep-local"},
+ Description: "Keep local file when a conflict is encountered",
+ OmitValue: true,
+ },
+ cli.BoolFlag{
+ Name: "keepLargest",
+ Patterns: []string{"--keep-largest"},
+ Description: "Keep largest file when a conflict is encountered",
+ OmitValue: true,
+ },
+ cli.BoolFlag{
+ Name: "deleteExtraneous",
+ Patterns: []string{"--delete-extraneous"},
+ Description: "Delete extraneous remote files",
+ OmitValue: true,
+ },
+ cli.BoolFlag{
+ Name: "dryRun",
+ Patterns: []string{"--dry-run"},
+ Description: "Show what would have been transferred",
+ OmitValue: true,
+ },
+ cli.BoolFlag{
+ Name: "noProgress",
+ Patterns: []string{"--no-progress"},
+ Description: "Hide progress",
+ OmitValue: true,
+ },
+ cli.IntFlag{
+ Name: "chunksize",
+ Patterns: []string{"--chunksize"},
+ Description: fmt.Sprintf("Set chunk size in bytes, default: %d", DefaultUploadChunkSize),
+ DefaultValue: DefaultUploadChunkSize,
+ },
+ ),
+ },
+ },
+ &cli.Handler{
+ Pattern: "[global] changes [options]",
+ Description: "List file changes",
+ Callback: listChangesHandler,
+ FlagGroups: cli.FlagGroups{
+ cli.NewFlagGroup("global", globalFlags...),
+ cli.NewFlagGroup("options",
+ cli.IntFlag{
+ Name: "maxChanges",
+ Patterns: []string{"-m", "--max"},
+ Description: fmt.Sprintf("Max changes to list, default: %d", DefaultMaxChanges),
+ DefaultValue: DefaultMaxChanges,
+ },
+ cli.StringFlag{
+ Name: "pageToken",
+ Patterns: []string{"--since"},
+ Description: fmt.Sprintf("Page token to start listing changes from"),
+ DefaultValue: "1",
+ },
+ cli.BoolFlag{
+ Name: "now",
+ Patterns: []string{"--now"},
+ Description: fmt.Sprintf("Get latest page token"),
+ OmitValue: true,
+ },
+ cli.IntFlag{
+ Name: "nameWidth",
+ Patterns: []string{"--name-width"},
+ Description: fmt.Sprintf("Width of name column, default: %d, minimum: 9, use 0 for full width", DefaultNameWidth),
+ DefaultValue: DefaultNameWidth,
+ },
+ cli.BoolFlag{
+ Name: "skipHeader",
+ Patterns: []string{"--no-header"},
+ Description: "Dont print the header",
+ OmitValue: true,
+ },
+ ),
+ },
+ },
+ &cli.Handler{
+ Pattern: "[global] revision list [options] <fileId>",
+ Description: "List file revisions",
+ Callback: listRevisionsHandler,
+ FlagGroups: cli.FlagGroups{
+ cli.NewFlagGroup("global", globalFlags...),
+ cli.NewFlagGroup("options",
+ cli.IntFlag{
+ Name: "nameWidth",
+ Patterns: []string{"--name-width"},
+ Description: fmt.Sprintf("Width of name column, default: %d, minimum: 9, use 0 for full width", DefaultNameWidth),
+ DefaultValue: DefaultNameWidth,
+ },
+ cli.BoolFlag{
+ Name: "skipHeader",
+ Patterns: []string{"--no-header"},
+ Description: "Dont print the header",
+ OmitValue: true,
+ },
+ cli.BoolFlag{
+ Name: "sizeInBytes",
+ Patterns: []string{"--bytes"},
+ Description: "Size in bytes",
+ OmitValue: true,
+ },
+ ),
+ },
+ },
+ &cli.Handler{
+ Pattern: "[global] revision download [options] <fileId> <revisionId>",
+ Description: "Download revision",
+ Callback: downloadRevisionHandler,
+ FlagGroups: cli.FlagGroups{
+ cli.NewFlagGroup("global", globalFlags...),
+ cli.NewFlagGroup("options",
+ cli.BoolFlag{
+ Name: "force",
+ Patterns: []string{"-f", "--force"},
+ Description: "Overwrite existing file",
+ OmitValue: true,
+ },
+ cli.BoolFlag{
+ Name: "noProgress",
+ Patterns: []string{"--no-progress"},
+ Description: "Hide progress",
+ OmitValue: true,
+ },
+ cli.BoolFlag{
+ Name: "stdout",
+ Patterns: []string{"--stdout"},
+ Description: "Write file content to stdout",
+ OmitValue: true,
+ },
+ cli.StringFlag{
+ Name: "path",
+ Patterns: []string{"--path"},
+ Description: "Download path",
+ },
+ ),
+ },
+ },
+ &cli.Handler{
+ Pattern: "[global] revision delete <fileId> <revisionId>",
+ Description: "Delete file revision",
+ Callback: deleteRevisionHandler,
+ FlagGroups: cli.FlagGroups{
+ cli.NewFlagGroup("global", globalFlags...),
+ },
+ },
+ &cli.Handler{
+ Pattern: "[global] import [options] <path>",
+ Description: "Upload and convert file to a google document, see 'about import' for available conversions",
+ Callback: importHandler,
+ FlagGroups: cli.FlagGroups{
+ cli.NewFlagGroup("global", globalFlags...),
+ cli.NewFlagGroup("options",
+ cli.StringSliceFlag{
+ Name: "parent",
+ Patterns: []string{"-p", "--parent"},
+ Description: "Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents",
+ },
+ cli.BoolFlag{
+ Name: "noProgress",
+ Patterns: []string{"--no-progress"},
+ Description: "Hide progress",
+ OmitValue: true,
+ },
+ ),
+ },
+ },
+ &cli.Handler{
+ Pattern: "[global] export [options] <fileId>",
+ Description: "Export a google document",
+ Callback: exportHandler,
+ FlagGroups: cli.FlagGroups{
+ cli.NewFlagGroup("global", globalFlags...),
+ cli.NewFlagGroup("options",
+ cli.BoolFlag{
+ Name: "force",
+ Patterns: []string{"-f", "--force"},
+ Description: "Overwrite existing file",
+ OmitValue: true,
+ },
+ cli.StringFlag{
+ Name: "mime",
+ Patterns: []string{"--mime"},
+ Description: "Mime type of exported file",
+ },
+ cli.BoolFlag{
+ Name: "printMimes",
+ Patterns: []string{"--print-mimes"},
+ Description: "Print available mime types for given file",
+ OmitValue: true,
+ },
+ ),
+ },
+ },
+ &cli.Handler{
+ Pattern: "[global] about [options]",
+ Description: "Google drive metadata, quota usage",
+ Callback: aboutHandler,
+ FlagGroups: cli.FlagGroups{
+ cli.NewFlagGroup("global", globalFlags...),
+ cli.NewFlagGroup("options",
+ cli.BoolFlag{
+ Name: "sizeInBytes",
+ Patterns: []string{"--bytes"},
+ Description: "Show size in bytes",
+ OmitValue: true,
+ },
+ ),
+ },
+ },
+ &cli.Handler{
+ Pattern: "[global] about import",
+ Description: "Show supported import formats",
+ Callback: aboutImportHandler,
+ FlagGroups: cli.FlagGroups{
+ cli.NewFlagGroup("global", globalFlags...),
+ },
+ },
+ &cli.Handler{
+ Pattern: "[global] about export",
+ Description: "Show supported export formats",
+ Callback: aboutExportHandler,
+ FlagGroups: cli.FlagGroups{
+ cli.NewFlagGroup("global", globalFlags...),
+ },
+ },
+ &cli.Handler{
+ Pattern: "version",
+ Description: "Print application version",
+ Callback: printVersion,
+ },
+ &cli.Handler{
+ Pattern: "help",
+ Description: "Print help",
+ Callback: printHelp,
+ },
+ &cli.Handler{
+ Pattern: "help <command>",
+ Description: "Print command help",
+ Callback: printCommandHelp,
+ },
+ &cli.Handler{
+ Pattern: "help <command> <subcommand>",
+ Description: "Print subcommand help",
+ Callback: printSubCommandHelp,
+ },
+ }
- cli.SetHandlers(handlers)
+ cli.SetHandlers(handlers)
- if ok := cli.Handle(os.Args[1:]); !ok {
- ExitF("No valid arguments given, use '%s help' to see available commands", Name)
- }
+ if ok := cli.Handle(os.Args[1:]); !ok {
+ ExitF("No valid arguments given, use '%s help' to see available commands", Name)
+ }
}
diff --git a/handlers_drive.go b/handlers_drive.go
index 05b1ca9..7812c9b 100644
--- a/handlers_drive.go
+++ b/handlers_drive.go
@@ -1,427 +1,426 @@
package main
import (
+ "./auth"
+ "./cli"
+ "./drive"
"fmt"
- "os"
"io"
"io/ioutil"
- "path/filepath"
"net/http"
- "./cli"
- "./auth"
- "./drive"
+ "os"
+ "path/filepath"
)
-const ClientId = "367116221053-7n0vf5akeru7on6o2fjinrecpdoe99eg.apps.googleusercontent.com"
+const ClientId = "367116221053-7n0vf5akeru7on6o2fjinrecpdoe99eg.apps.googleusercontent.com"
const ClientSecret = "1qsNodXNaWq1mQuBjUjmvhoO"
const TokenFilename = "token_v2.json"
const DefaultCacheFileName = "file_cache.json"
-
func listHandler(ctx cli.Context) {
- args := ctx.Args()
- err := newDrive(args).List(drive.ListFilesArgs{
- Out: os.Stdout,
- MaxFiles: args.Int64("maxFiles"),
- NameWidth: args.Int64("nameWidth"),
- Query: args.String("query"),
- SortOrder: args.String("sortOrder"),
- SkipHeader: args.Bool("skipHeader"),
- SizeInBytes: args.Bool("sizeInBytes"),
- AbsPath: args.Bool("absPath"),
- })
- checkErr(err)
+ args := ctx.Args()
+ err := newDrive(args).List(drive.ListFilesArgs{
+ Out: os.Stdout,
+ MaxFiles: args.Int64("maxFiles"),
+ NameWidth: args.Int64("nameWidth"),
+ Query: args.String("query"),
+ SortOrder: args.String("sortOrder"),
+ SkipHeader: args.Bool("skipHeader"),
+ SizeInBytes: args.Bool("sizeInBytes"),
+ AbsPath: args.Bool("absPath"),
+ })
+ checkErr(err)
}
func listChangesHandler(ctx cli.Context) {
- args := ctx.Args()
- err := newDrive(args).ListChanges(drive.ListChangesArgs{
- Out: os.Stdout,
- PageToken: args.String("pageToken"),
- MaxChanges: args.Int64("maxChanges"),
- Now: args.Bool("now"),
- NameWidth: args.Int64("nameWidth"),
- SkipHeader: args.Bool("skipHeader"),
- })
- checkErr(err)
+ args := ctx.Args()
+ err := newDrive(args).ListChanges(drive.ListChangesArgs{
+ Out: os.Stdout,
+ PageToken: args.String("pageToken"),
+ MaxChanges: args.Int64("maxChanges"),
+ Now: args.Bool("now"),
+ NameWidth: args.Int64("nameWidth"),
+ SkipHeader: args.Bool("skipHeader"),
+ })
+ checkErr(err)
}
func downloadHandler(ctx cli.Context) {
- args := ctx.Args()
- checkDownloadArgs(args)
- err := newDrive(args).Download(drive.DownloadArgs{
- Out: os.Stdout,
- Id: args.String("fileId"),
- Force: args.Bool("force"),
- Path: args.String("path"),
- Delete: args.Bool("delete"),
- Recursive: args.Bool("recursive"),
- Stdout: args.Bool("stdout"),
- Progress: progressWriter(args.Bool("noProgress")),
- })
- checkErr(err)
+ args := ctx.Args()
+ checkDownloadArgs(args)
+ err := newDrive(args).Download(drive.DownloadArgs{
+ Out: os.Stdout,
+ Id: args.String("fileId"),
+ Force: args.Bool("force"),
+ Path: args.String("path"),
+ Delete: args.Bool("delete"),
+ Recursive: args.Bool("recursive"),
+ Stdout: args.Bool("stdout"),
+ Progress: progressWriter(args.Bool("noProgress")),
+ })
+ checkErr(err)
}
func downloadQueryHandler(ctx cli.Context) {
- args := ctx.Args()
- err := newDrive(args).DownloadQuery(drive.DownloadQueryArgs{
- Out: os.Stdout,
- Query: args.String("query"),
- Force: args.Bool("force"),
- Recursive: args.Bool("recursive"),
- Path: args.String("path"),
- Progress: progressWriter(args.Bool("noProgress")),
- })
- checkErr(err)
+ args := ctx.Args()
+ err := newDrive(args).DownloadQuery(drive.DownloadQueryArgs{
+ Out: os.Stdout,
+ Query: args.String("query"),
+ Force: args.Bool("force"),
+ Recursive: args.Bool("recursive"),
+ Path: args.String("path"),
+ Progress: progressWriter(args.Bool("noProgress")),
+ })
+ checkErr(err)
}
func downloadSyncHandler(ctx cli.Context) {
- args := ctx.Args()
- cachePath := filepath.Join(args.String("configDir"), DefaultCacheFileName)
- err := newDrive(args).DownloadSync(drive.DownloadSyncArgs{
- Out: os.Stdout,
- Progress: progressWriter(args.Bool("noProgress")),
- Path: args.String("path"),
- RootId: args.String("fileId"),
- DryRun: args.Bool("dryRun"),
- DeleteExtraneous: args.Bool("deleteExtraneous"),
- Resolution: conflictResolution(args),
- Comparer: NewCachedMd5Comparer(cachePath),
- })
- checkErr(err)
+ args := ctx.Args()
+ cachePath := filepath.Join(args.String("configDir"), DefaultCacheFileName)
+ err := newDrive(args).DownloadSync(drive.DownloadSyncArgs{
+ Out: os.Stdout,
+ Progress: progressWriter(args.Bool("noProgress")),
+ Path: args.String("path"),
+ RootId: args.String("fileId"),
+ DryRun: args.Bool("dryRun"),
+ DeleteExtraneous: args.Bool("deleteExtraneous"),
+ Resolution: conflictResolution(args),
+ Comparer: NewCachedMd5Comparer(cachePath),
+ })
+ checkErr(err)
}
func downloadRevisionHandler(ctx cli.Context) {
- args := ctx.Args()
- err := newDrive(args).DownloadRevision(drive.DownloadRevisionArgs{
- Out: os.Stdout,
- FileId: args.String("fileId"),
- RevisionId: args.String("revisionId"),
- Force: args.Bool("force"),
- Stdout: args.Bool("stdout"),
- Path: args.String("path"),
- Progress: progressWriter(args.Bool("noProgress")),
- })
- checkErr(err)
+ args := ctx.Args()
+ err := newDrive(args).DownloadRevision(drive.DownloadRevisionArgs{
+ Out: os.Stdout,
+ FileId: args.String("fileId"),
+ RevisionId: args.String("revisionId"),
+ Force: args.Bool("force"),
+ Stdout: args.Bool("stdout"),
+ Path: args.String("path"),
+ Progress: progressWriter(args.Bool("noProgress")),
+ })
+ checkErr(err)
}
func uploadHandler(ctx cli.Context) {
- args := ctx.Args()
- checkUploadArgs(args)
- err := newDrive(args).Upload(drive.UploadArgs{
- Out: os.Stdout,
- Progress: progressWriter(args.Bool("noProgress")),
- Path: args.String("path"),
- Name: args.String("name"),
- Parents: args.StringSlice("parent"),
- Mime: args.String("mime"),
- Recursive: args.Bool("recursive"),
- Share: args.Bool("share"),
- Delete: args.Bool("delete"),
- ChunkSize: args.Int64("chunksize"),
- })
- checkErr(err)
+ args := ctx.Args()
+ checkUploadArgs(args)
+ err := newDrive(args).Upload(drive.UploadArgs{
+ Out: os.Stdout,
+ Progress: progressWriter(args.Bool("noProgress")),
+ Path: args.String("path"),
+ Name: args.String("name"),
+ Parents: args.StringSlice("parent"),
+ Mime: args.String("mime"),
+ Recursive: args.Bool("recursive"),
+ Share: args.Bool("share"),
+ Delete: args.Bool("delete"),
+ ChunkSize: args.Int64("chunksize"),
+ })
+ checkErr(err)
}
func uploadStdinHandler(ctx cli.Context) {
- args := ctx.Args()
- err := newDrive(args).UploadStream(drive.UploadStreamArgs{
- Out: os.Stdout,
- In: os.Stdin,
- Name: args.String("name"),
- Parents: args.StringSlice("parent"),
- Mime: args.String("mime"),
- Share: args.Bool("share"),
- ChunkSize: args.Int64("chunksize"),
- Progress: progressWriter(args.Bool("noProgress")),
- })
- checkErr(err)
+ args := ctx.Args()
+ err := newDrive(args).UploadStream(drive.UploadStreamArgs{
+ Out: os.Stdout,
+ In: os.Stdin,
+ Name: args.String("name"),
+ Parents: args.StringSlice("parent"),
+ Mime: args.String("mime"),
+ Share: args.Bool("share"),
+ ChunkSize: args.Int64("chunksize"),
+ Progress: progressWriter(args.Bool("noProgress")),
+ })
+ checkErr(err)
}
func uploadSyncHandler(ctx cli.Context) {
- args := ctx.Args()
- cachePath := filepath.Join(args.String("configDir"), DefaultCacheFileName)
- err := newDrive(args).UploadSync(drive.UploadSyncArgs{
- Out: os.Stdout,
- Progress: progressWriter(args.Bool("noProgress")),
- Path: args.String("path"),
- RootId: args.String("fileId"),
- DryRun: args.Bool("dryRun"),
- DeleteExtraneous: args.Bool("deleteExtraneous"),
- ChunkSize: args.Int64("chunksize"),
- Resolution: conflictResolution(args),
- Comparer: NewCachedMd5Comparer(cachePath),
- })
- checkErr(err)
+ args := ctx.Args()
+ cachePath := filepath.Join(args.String("configDir"), DefaultCacheFileName)
+ err := newDrive(args).UploadSync(drive.UploadSyncArgs{
+ Out: os.Stdout,
+ Progress: progressWriter(args.Bool("noProgress")),
+ Path: args.String("path"),
+ RootId: args.String("fileId"),
+ DryRun: args.Bool("dryRun"),
+ DeleteExtraneous: args.Bool("deleteExtraneous"),
+ ChunkSize: args.Int64("chunksize"),
+ Resolution: conflictResolution(args),
+ Comparer: NewCachedMd5Comparer(cachePath),
+ })
+ checkErr(err)
}
func updateHandler(ctx cli.Context) {
- args := ctx.Args()
- err := newDrive(args).Update(drive.UpdateArgs{
- Out: os.Stdout,
- Id: args.String("fileId"),
- Path: args.String("path"),
- Name: args.String("name"),
- Parents: args.StringSlice("parent"),
- Mime: args.String("mime"),
- Progress: progressWriter(args.Bool("noProgress")),
- ChunkSize: args.Int64("chunksize"),
- })
- checkErr(err)
+ args := ctx.Args()
+ err := newDrive(args).Update(drive.UpdateArgs{
+ Out: os.Stdout,
+ Id: args.String("fileId"),
+ Path: args.String("path"),
+ Name: args.String("name"),
+ Parents: args.StringSlice("parent"),
+ Mime: args.String("mime"),
+ Progress: progressWriter(args.Bool("noProgress")),
+ ChunkSize: args.Int64("chunksize"),
+ })
+ checkErr(err)
}
func infoHandler(ctx cli.Context) {
- args := ctx.Args()
- err := newDrive(args).Info(drive.FileInfoArgs{
- Out: os.Stdout,
- Id: args.String("fileId"),
- SizeInBytes: args.Bool("sizeInBytes"),
- })
- checkErr(err)
+ args := ctx.Args()
+ err := newDrive(args).Info(drive.FileInfoArgs{
+ Out: os.Stdout,
+ Id: args.String("fileId"),
+ SizeInBytes: args.Bool("sizeInBytes"),
+ })
+ checkErr(err)
}
func importHandler(ctx cli.Context) {
- args := ctx.Args()
- err := newDrive(args).Import(drive.ImportArgs{
- Out: os.Stdout,
- Path: args.String("path"),
- Parents: args.StringSlice("parent"),
- Progress: progressWriter(args.Bool("noProgress")),
- })
- checkErr(err)
+ args := ctx.Args()
+ err := newDrive(args).Import(drive.ImportArgs{
+ Out: os.Stdout,
+ Path: args.String("path"),
+ Parents: args.StringSlice("parent"),
+ Progress: progressWriter(args.Bool("noProgress")),
+ })
+ checkErr(err)
}
func exportHandler(ctx cli.Context) {
- args := ctx.Args()
- err := newDrive(args).Export(drive.ExportArgs{
- Out: os.Stdout,
- Id: args.String("fileId"),
- Mime: args.String("mime"),
- PrintMimes: args.Bool("printMimes"),
- Force: args.Bool("force"),
- })
- checkErr(err)
+ args := ctx.Args()
+ err := newDrive(args).Export(drive.ExportArgs{
+ Out: os.Stdout,
+ Id: args.String("fileId"),
+ Mime: args.String("mime"),
+ PrintMimes: args.Bool("printMimes"),
+ Force: args.Bool("force"),
+ })
+ checkErr(err)
}
func listRevisionsHandler(ctx cli.Context) {
- args := ctx.Args()
- err := newDrive(args).ListRevisions(drive.ListRevisionsArgs{
- Out: os.Stdout,
- Id: args.String("fileId"),
- NameWidth: args.Int64("nameWidth"),
- SizeInBytes: args.Bool("sizeInBytes"),
- SkipHeader: args.Bool("skipHeader"),
- })
- checkErr(err)
+ args := ctx.Args()
+ err := newDrive(args).ListRevisions(drive.ListRevisionsArgs{
+ Out: os.Stdout,
+ Id: args.String("fileId"),
+ NameWidth: args.Int64("nameWidth"),
+ SizeInBytes: args.Bool("sizeInBytes"),
+ SkipHeader: args.Bool("skipHeader"),
+ })
+ checkErr(err)
}
func mkdirHandler(ctx cli.Context) {
- args := ctx.Args()
- err := newDrive(args).Mkdir(drive.MkdirArgs{
- Out: os.Stdout,
- Name: args.String("name"),
- Parents: args.StringSlice("parent"),
- })
- checkErr(err)
+ args := ctx.Args()
+ err := newDrive(args).Mkdir(drive.MkdirArgs{
+ Out: os.Stdout,
+ Name: args.String("name"),
+ Parents: args.StringSlice("parent"),
+ })
+ checkErr(err)
}
func shareHandler(ctx cli.Context) {
- args := ctx.Args()
- err := newDrive(args).Share(drive.ShareArgs{
- Out: os.Stdout,
- FileId: args.String("fileId"),
- Role: args.String("role"),
- Type: args.String("type"),
- Email: args.String("email"),
- Discoverable: args.Bool("discoverable"),
- })
- checkErr(err)
+ args := ctx.Args()
+ err := newDrive(args).Share(drive.ShareArgs{
+ Out: os.Stdout,
+ FileId: args.String("fileId"),
+ Role: args.String("role"),
+ Type: args.String("type"),
+ Email: args.String("email"),
+ Discoverable: args.Bool("discoverable"),
+ })
+ checkErr(err)
}
func shareListHandler(ctx cli.Context) {
- args := ctx.Args()
- err := newDrive(args).ListPermissions(drive.ListPermissionsArgs{
- Out: os.Stdout,
- FileId: args.String("fileId"),
- })
- checkErr(err)
+ args := ctx.Args()
+ err := newDrive(args).ListPermissions(drive.ListPermissionsArgs{
+ Out: os.Stdout,
+ FileId: args.String("fileId"),
+ })
+ checkErr(err)
}
func shareRevokeHandler(ctx cli.Context) {
- args := ctx.Args()
- err := newDrive(args).RevokePermission(drive.RevokePermissionArgs{
- Out: os.Stdout,
- FileId: args.String("fileId"),
- PermissionId: args.String("permissionId"),
- })
- checkErr(err)
+ args := ctx.Args()
+ err := newDrive(args).RevokePermission(drive.RevokePermissionArgs{
+ Out: os.Stdout,
+ FileId: args.String("fileId"),
+ PermissionId: args.String("permissionId"),
+ })
+ checkErr(err)
}
func deleteHandler(ctx cli.Context) {
- args := ctx.Args()
- err := newDrive(args).Delete(drive.DeleteArgs{
- Out: os.Stdout,
- Id: args.String("fileId"),
- Recursive: args.Bool("recursive"),
- })
- checkErr(err)
+ args := ctx.Args()
+ err := newDrive(args).Delete(drive.DeleteArgs{
+ Out: os.Stdout,
+ Id: args.String("fileId"),
+ Recursive: args.Bool("recursive"),
+ })
+ checkErr(err)
}
func listSyncHandler(ctx cli.Context) {
- args := ctx.Args()
- err := newDrive(args).ListSync(drive.ListSyncArgs{
- Out: os.Stdout,
- SkipHeader: args.Bool("skipHeader"),
- })
- checkErr(err)
+ args := ctx.Args()
+ err := newDrive(args).ListSync(drive.ListSyncArgs{
+ Out: os.Stdout,
+ SkipHeader: args.Bool("skipHeader"),
+ })
+ checkErr(err)
}
func listRecursiveSyncHandler(ctx cli.Context) {
- args := ctx.Args()
- err := newDrive(args).ListRecursiveSync(drive.ListRecursiveSyncArgs{
- Out: os.Stdout,
- RootId: args.String("fileId"),
- SkipHeader: args.Bool("skipHeader"),
- PathWidth: args.Int64("pathWidth"),
- SizeInBytes: args.Bool("sizeInBytes"),
- SortOrder: args.String("sortOrder"),
- })
- checkErr(err)
+ args := ctx.Args()
+ err := newDrive(args).ListRecursiveSync(drive.ListRecursiveSyncArgs{
+ Out: os.Stdout,
+ RootId: args.String("fileId"),
+ SkipHeader: args.Bool("skipHeader"),
+ PathWidth: args.Int64("pathWidth"),
+ SizeInBytes: args.Bool("sizeInBytes"),
+ SortOrder: args.String("sortOrder"),
+ })
+ checkErr(err)
}
func deleteRevisionHandler(ctx cli.Context) {
- args := ctx.Args()
- err := newDrive(args).DeleteRevision(drive.DeleteRevisionArgs{
- Out: os.Stdout,
- FileId: args.String("fileId"),
- RevisionId: args.String("revisionId"),
- })
- checkErr(err)
+ args := ctx.Args()
+ err := newDrive(args).DeleteRevision(drive.DeleteRevisionArgs{
+ Out: os.Stdout,
+ FileId: args.String("fileId"),
+ RevisionId: args.String("revisionId"),
+ })
+ checkErr(err)
}
func aboutHandler(ctx cli.Context) {
- args := ctx.Args()
- err := newDrive(args).About(drive.AboutArgs{
- Out: os.Stdout,
- SizeInBytes: args.Bool("sizeInBytes"),
- })
- checkErr(err)
+ args := ctx.Args()
+ err := newDrive(args).About(drive.AboutArgs{
+ Out: os.Stdout,
+ SizeInBytes: args.Bool("sizeInBytes"),
+ })
+ checkErr(err)
}
func aboutImportHandler(ctx cli.Context) {
- args := ctx.Args()
- err := newDrive(args).AboutImport(drive.AboutImportArgs{
- Out: os.Stdout,
- })
- checkErr(err)
+ args := ctx.Args()
+ err := newDrive(args).AboutImport(drive.AboutImportArgs{
+ Out: os.Stdout,
+ })
+ checkErr(err)
}
func aboutExportHandler(ctx cli.Context) {
- args := ctx.Args()
- err := newDrive(args).AboutExport(drive.AboutExportArgs{
- Out: os.Stdout,
- })
- checkErr(err)
+ args := ctx.Args()
+ err := newDrive(args).AboutExport(drive.AboutExportArgs{
+ Out: os.Stdout,
+ })
+ checkErr(err)
}
func getOauthClient(args cli.Arguments) (*http.Client, error) {
- if args.String("refreshToken") != "" && args.String("accessToken") != "" {
- ExitF("Access token not needed when refresh token is provided")
- }
+ if args.String("refreshToken") != "" && args.String("accessToken") != "" {
+ ExitF("Access token not needed when refresh token is provided")
+ }
- if args.String("refreshToken") != "" {
- return auth.NewRefreshTokenClient(ClientId, ClientSecret, args.String("refreshToken")), nil
- }
+ if args.String("refreshToken") != "" {
+ return auth.NewRefreshTokenClient(ClientId, ClientSecret, args.String("refreshToken")), nil
+ }
- if args.String("accessToken") != "" {
- return auth.NewAccessTokenClient(ClientId, ClientSecret, args.String("accessToken")), nil
- }
+ if args.String("accessToken") != "" {
+ return auth.NewAccessTokenClient(ClientId, ClientSecret, args.String("accessToken")), nil
+ }
- configDir := getConfigDir(args)
- tokenPath := ConfigFilePath(configDir, TokenFilename)
- return auth.NewFileSourceClient(ClientId, ClientSecret, tokenPath, authCodePrompt)
+ configDir := getConfigDir(args)
+ tokenPath := ConfigFilePath(configDir, TokenFilename)
+ return auth.NewFileSourceClient(ClientId, ClientSecret, tokenPath, authCodePrompt)
}
func getConfigDir(args cli.Arguments) string {
- // Use dir from environment var if present
- if os.Getenv("GDRIVE_CONFIG_DIR") != "" {
- return os.Getenv("GDRIVE_CONFIG_DIR")
- }
- return args.String("configDir")
+ // Use dir from environment var if present
+ if os.Getenv("GDRIVE_CONFIG_DIR") != "" {
+ return os.Getenv("GDRIVE_CONFIG_DIR")
+ }
+ return args.String("configDir")
}
func newDrive(args cli.Arguments) *drive.Drive {
- oauth, err := getOauthClient(args)
- if err != nil {
- ExitF("Failed getting oauth client: %s", err.Error())
- }
+ oauth, err := getOauthClient(args)
+ if err != nil {
+ ExitF("Failed getting oauth client: %s", err.Error())
+ }
- client, err := drive.New(oauth)
- if err != nil {
- ExitF("Failed getting drive: %s", err.Error())
- }
+ client, err := drive.New(oauth)
+ if err != nil {
+ ExitF("Failed getting drive: %s", err.Error())
+ }
- return client
+ return client
}
func authCodePrompt(url string) func() string {
- return func() string {
- fmt.Println("Authentication needed")
- fmt.Println("Go to the following url in your browser:")
- fmt.Printf("%s\n\n", url)
- fmt.Print("Enter verification code: ")
+ return func() string {
+ fmt.Println("Authentication needed")
+ fmt.Println("Go to the following url in your browser:")
+ fmt.Printf("%s\n\n", url)
+ fmt.Print("Enter verification code: ")
- var code string
- if _, err := fmt.Scan(&code); err != nil {
- fmt.Printf("Failed reading code: %s", err.Error())
- }
- return code
- }
+ var code string
+ if _, err := fmt.Scan(&code); err != nil {
+ fmt.Printf("Failed reading code: %s", err.Error())
+ }
+ return code
+ }
}
func progressWriter(discard bool) io.Writer {
- if discard {
- return ioutil.Discard
- }
- return os.Stderr
+ if discard {
+ return ioutil.Discard
+ }
+ return os.Stderr
}
func conflictResolution(args cli.Arguments) drive.ConflictResolution {
- keepLocal := args.Bool("keepLocal")
- keepRemote := args.Bool("keepRemote")
- keepLargest := args.Bool("keepLargest")
+ keepLocal := args.Bool("keepLocal")
+ keepRemote := args.Bool("keepRemote")
+ keepLargest := args.Bool("keepLargest")
- if (keepLocal && keepRemote) || (keepLocal && keepLargest) || (keepRemote && keepLargest) {
- ExitF("Only one conflict resolution flag can be given")
- }
+ if (keepLocal && keepRemote) || (keepLocal && keepLargest) || (keepRemote && keepLargest) {
+ ExitF("Only one conflict resolution flag can be given")
+ }
- if keepLocal {
- return drive.KeepLocal
- }
+ if keepLocal {
+ return drive.KeepLocal
+ }
- if keepRemote {
- return drive.KeepRemote
- }
+ if keepRemote {
+ return drive.KeepRemote
+ }
- if keepLargest {
- return drive.KeepLargest
- }
+ if keepLargest {
+ return drive.KeepLargest
+ }
- return drive.NoResolution
+ return drive.NoResolution
}
func checkUploadArgs(args cli.Arguments) {
- if args.Bool("recursive") && args.Bool("delete") {
- ExitF("--delete is not allowed for recursive uploads")
- }
+ if args.Bool("recursive") && args.Bool("delete") {
+ ExitF("--delete is not allowed for recursive uploads")
+ }
- if args.Bool("recursive") && args.Bool("share") {
- ExitF("--share is not allowed for recursive uploads")
- }
+ if args.Bool("recursive") && args.Bool("share") {
+ ExitF("--share is not allowed for recursive uploads")
+ }
}
func checkDownloadArgs(args cli.Arguments) {
- if args.Bool("recursive") && args.Bool("delete") {
- ExitF("--delete is not allowed for recursive downloads")
- }
+ if args.Bool("recursive") && args.Bool("delete") {
+ ExitF("--delete is not allowed for recursive downloads")
+ }
}
diff --git a/handlers_meta.go b/handlers_meta.go
index 52be710..72e3dd0 100644
--- a/handlers_meta.go
+++ b/handlers_meta.go
@@ -1,95 +1,95 @@
package main
import (
- "os"
+ "./cli"
"fmt"
- "strings"
+ "os"
"runtime"
- "text/tabwriter"
- "./cli"
+ "strings"
+ "text/tabwriter"
)
func printVersion(ctx cli.Context) {
- fmt.Printf("%s: %s\n", Name, Version)
- fmt.Printf("Golang: %s\n", runtime.Version())
- fmt.Printf("OS/Arch: %s/%s\n", runtime.GOOS, runtime.GOARCH)
+ fmt.Printf("%s: %s\n", Name, Version)
+ fmt.Printf("Golang: %s\n", runtime.Version())
+ fmt.Printf("OS/Arch: %s/%s\n", runtime.GOOS, runtime.GOARCH)
}
func printHelp(ctx cli.Context) {
- w := new(tabwriter.Writer)
- w.Init(os.Stdout, 0, 0, 3, ' ', 0)
+ w := new(tabwriter.Writer)
+ w.Init(os.Stdout, 0, 0, 3, ' ', 0)
- fmt.Fprintf(w, "%s usage:\n\n", Name)
+ fmt.Fprintf(w, "%s usage:\n\n", Name)
- for _, h := range ctx.Handlers() {
- fmt.Fprintf(w, "%s %s\t%s\n", Name, h.Pattern, h.Description)
- }
+ for _, h := range ctx.Handlers() {
+ fmt.Fprintf(w, "%s %s\t%s\n", Name, h.Pattern, h.Description)
+ }
- w.Flush()
+ w.Flush()
}
func printCommandHelp(ctx cli.Context) {
- args := ctx.Args()
- printCommandPrefixHelp(ctx, args.String("command"))
+ args := ctx.Args()
+ printCommandPrefixHelp(ctx, args.String("command"))
}
func printSubCommandHelp(ctx cli.Context) {
- args := ctx.Args()
- printCommandPrefixHelp(ctx, args.String("command"), args.String("subcommand"))
+ args := ctx.Args()
+ printCommandPrefixHelp(ctx, args.String("command"), args.String("subcommand"))
}
func printCommandPrefixHelp(ctx cli.Context, prefix ...string) {
- handler := getHandler(ctx.Handlers(), prefix)
-
- if handler == nil {
- ExitF("Command not found")
- }
-
- w := new(tabwriter.Writer)
- w.Init(os.Stdout, 0, 0, 3, ' ', 0)
-
- fmt.Fprintf(w, "%s\n", handler.Description)
- fmt.Fprintf(w, "%s %s\n", Name, handler.Pattern)
- for _, group := range handler.FlagGroups {
- fmt.Fprintf(w, "\n%s:\n", group.Name)
- for _, flag := range group.Flags {
- boolFlag, isBool := flag.(cli.BoolFlag)
- if isBool && boolFlag.OmitValue {
- fmt.Fprintf(w, " %s\t%s\n", strings.Join(flag.GetPatterns(), ", "), flag.GetDescription())
- } else {
- fmt.Fprintf(w, " %s <%s>\t%s\n", strings.Join(flag.GetPatterns(), ", "), flag.GetName(), flag.GetDescription())
- }
- }
- }
-
- w.Flush()
+ handler := getHandler(ctx.Handlers(), prefix)
+
+ if handler == nil {
+ ExitF("Command not found")
+ }
+
+ w := new(tabwriter.Writer)
+ w.Init(os.Stdout, 0, 0, 3, ' ', 0)
+
+ fmt.Fprintf(w, "%s\n", handler.Description)
+ fmt.Fprintf(w, "%s %s\n", Name, handler.Pattern)
+ for _, group := range handler.FlagGroups {
+ fmt.Fprintf(w, "\n%s:\n", group.Name)
+ for _, flag := range group.Flags {
+ boolFlag, isBool := flag.(cli.BoolFlag)
+ if isBool && boolFlag.OmitValue {
+ fmt.Fprintf(w, " %s\t%s\n", strings.Join(flag.GetPatterns(), ", "), flag.GetDescription())
+ } else {
+ fmt.Fprintf(w, " %s <%s>\t%s\n", strings.Join(flag.GetPatterns(), ", "), flag.GetName(), flag.GetDescription())
+ }
+ }
+ }
+
+ w.Flush()
}
func getHandler(handlers []*cli.Handler, prefix []string) *cli.Handler {
- for _, h := range handlers {
- pattern := stripOptionals(h.SplitPattern())
+ for _, h := range handlers {
+ pattern := stripOptionals(h.SplitPattern())
- if len(prefix) > len(pattern) {
- continue
- }
+ if len(prefix) > len(pattern) {
+ continue
+ }
- if equal(prefix, pattern[:len(prefix)]) {
- return h
- }
- }
+ if equal(prefix, pattern[:len(prefix)]) {
+ return h
+ }
+ }
- return nil
+ return nil
}
// Strip optional groups (<...>) from pattern
func stripOptionals(pattern []string) []string {
- newArgs := []string{}
-
- for _, arg := range pattern {
- if strings.HasPrefix(arg, "[") && strings.HasSuffix(arg, "]") {
- continue
- }
- newArgs = append(newArgs, arg)
- }
- return newArgs
+ newArgs := []string{}
+
+ for _, arg := range pattern {
+ if strings.HasPrefix(arg, "[") && strings.HasSuffix(arg, "]") {
+ continue
+ }
+ newArgs = append(newArgs, arg)
+ }
+ return newArgs
}
diff --git a/util.go b/util.go
index 041daed..dbdd394 100644
--- a/util.go
+++ b/util.go
@@ -1,21 +1,21 @@
package main
import (
- "runtime"
- "path/filepath"
- "fmt"
- "encoding/json"
- "os"
- "io"
- "crypto/md5"
+ "crypto/md5"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "runtime"
)
func GetDefaultConfigDir() string {
- return filepath.Join(Homedir(), ".gdrive")
+ return filepath.Join(Homedir(), ".gdrive")
}
func ConfigFilePath(basePath, name string) string {
- return filepath.Join(basePath, name)
+ return filepath.Join(basePath, name)
}
func Homedir() string {
@@ -26,25 +26,25 @@ func Homedir() string {
}
func equal(a, b []string) bool {
- if a == nil && b == nil {
- return true;
- }
+ if a == nil && b == nil {
+ return true
+ }
- if a == nil || b == nil {
- return false;
- }
+ if a == nil || b == nil {
+ return false
+ }
- if len(a) != len(b) {
- return false
- }
+ if len(a) != len(b) {
+ return false
+ }
- for i := range a {
- if a[i] != b[i] {
- return false
- }
- }
+ for i := range a {
+ if a[i] != b[i] {
+ return false
+ }
+ }
- return true
+ return true
}
func ExitF(format string, a ...interface{}) {
@@ -54,37 +54,37 @@ func ExitF(format string, a ...interface{}) {
}
func checkErr(err error) {
- if err != nil {
- fmt.Println(err)
- os.Exit(1)
- }
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
}
func writeJson(path string, data interface{}) error {
- tmpFile := path + ".tmp"
- f, err := os.Create(tmpFile)
- if err != nil {
- return err
- }
-
- err = json.NewEncoder(f).Encode(data)
- f.Close()
- if err != nil {
- os.Remove(tmpFile)
- return err
- }
-
- return os.Rename(tmpFile, path)
+ tmpFile := path + ".tmp"
+ f, err := os.Create(tmpFile)
+ if err != nil {
+ return err
+ }
+
+ err = json.NewEncoder(f).Encode(data)
+ f.Close()
+ if err != nil {
+ os.Remove(tmpFile)
+ return err
+ }
+
+ return os.Rename(tmpFile, path)
}
func md5sum(path string) string {
- h := md5.New()
- f, err := os.Open(path)
- if err != nil {
- return ""
- }
- defer f.Close()
-
- io.Copy(h, f)
- return fmt.Sprintf("%x", h.Sum(nil))
+ h := md5.New()
+ f, err := os.Open(path)
+ if err != nil {
+ return ""
+ }
+ defer f.Close()
+
+ io.Copy(h, f)
+ return fmt.Sprintf("%x", h.Sum(nil))
}