From 137298be0c1ec3e95825971fa25f0877ec884ad4 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Mon, 11 Jan 2016 21:36:33 +0100 Subject: [PATCH 001/195] Basic list and download support --- .gitignore | 2 +- cli/cli.go | 509 --------------------------------------------- cli/context.go | 41 ++++ cli/flags.go | 132 ++++++++++++ cli/handler.go | 125 +++++++++++ cli/parser.go | 307 +++++++++++++++++++++++++++ client/auth.go | 32 +++ client/client.go | 28 +++ drive.go | 2 +- drive/files.go | 68 ++++++ drive/types.go | 37 ++++ drive/util.go | 20 ++ gdrive.go | 270 ++++++++++++++++++++++++ gdrive/handlers.go | 0 handlers.go | 100 +++++++++ util.go | 29 +++ 16 files changed, 1191 insertions(+), 511 deletions(-) delete mode 100644 cli/cli.go create mode 100644 cli/context.go create mode 100644 cli/flags.go create mode 100644 cli/handler.go create mode 100644 cli/parser.go create mode 100644 client/auth.go create mode 100644 client/client.go create mode 100644 drive/files.go create mode 100644 drive/types.go create mode 100644 drive/util.go create mode 100644 gdrive.go create mode 100644 gdrive/handlers.go create mode 100644 handlers.go create mode 100644 util.go diff --git a/.gitignore b/.gitignore index 6b18ed86..36d220a7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,9 @@ # Ignore bin folder and drive binary _release/bin -drive # vim files .*.sw[a-z] *.un~ Session.vim .netrwhist +drive_old diff --git a/cli/cli.go b/cli/cli.go deleted file mode 100644 index b6628413..00000000 --- a/cli/cli.go +++ /dev/null @@ -1,509 +0,0 @@ -package cli - -import ( - "fmt" - "github.com/prasmussen/gdrive/gdrive" - "github.com/prasmussen/gdrive/util" - "github.com/prasmussen/google-api-go-client/drive/v2" - "golang.org/x/net/context" - "io" - "mime" - "os" - "path/filepath" - "strings" -) - -// List of google docs mime types excluding vnd.google-apps.folder -var googleMimeTypes = []string{ - "application/vnd.google-apps.audio", - "application/vnd.google-apps.document", - "application/vnd.google-apps.drawing", - "application/vnd.google-apps.file", - "application/vnd.google-apps.form", - "application/vnd.google-apps.fusiontable", - "application/vnd.google-apps.photo", - "application/vnd.google-apps.presentation", - "application/vnd.google-apps.script", - "application/vnd.google-apps.sites", - "application/vnd.google-apps.spreadsheet", - "application/vnd.google-apps.unknown", - "application/vnd.google-apps.video", - "application/vnd.google-apps.map", -} - -func List(d *gdrive.Drive, query, titleFilter string, maxResults int, sharedStatus, noHeader, includeDocs, sizeInBytes bool) error { - caller := d.Files.List() - queryList := []string{} - - if maxResults > 0 { - caller.MaxResults(int64(maxResults)) - } - - if titleFilter != "" { - q := fmt.Sprintf("title contains '%s'", titleFilter) - queryList = append(queryList, q) - } - - if query != "" { - queryList = append(queryList, query) - } else { - // Skip trashed files - queryList = append(queryList, "trashed = false") - - // Skip google docs - if !includeDocs { - for _, mime := range googleMimeTypes { - q := fmt.Sprintf("mimeType != '%s'", mime) - queryList = append(queryList, q) - } - } - } - - if len(queryList) > 0 { - q := strings.Join(queryList, " and ") - caller.Q(q) - } - - list, err := caller.Do() - if err != nil { - return err - } - - files := list.Items - - for list.NextPageToken != "" { - if maxResults > 0 && len(files) > maxResults { - break - } - - caller.PageToken(list.NextPageToken) - list, err = caller.Do() - if err != nil { - return err - } - files = append(files, list.Items...) - } - - items := make([]map[string]string, 0, 0) - - for _, f := range files { - if maxResults > 0 && len(items) >= maxResults { - break - } - - items = append(items, map[string]string{ - "Id": f.Id, - "Title": util.TruncateString(f.Title, 40), - "Size": util.FileSizeFormat(f.FileSize, sizeInBytes), - "Created": util.ISODateToLocal(f.CreatedDate), - }) - } - - columnOrder := []string{"Id", "Title", "Size", "Created"} - - if sharedStatus { - addSharedStatus(d, items) - columnOrder = append(columnOrder, "Shared") - } - - util.PrintColumns(items, columnOrder, 3, noHeader) - return nil -} - -// Adds the key-value-pair 'Shared: True/False' to the map -func addSharedStatus(d *gdrive.Drive, items []map[string]string) { - // Limit to 10 simultaneous requests - active := make(chan bool, 10) - done := make(chan bool) - - // Closure that performs the check - checkStatus := func(item map[string]string) { - // Wait for an empty spot in the active queue - active <- true - - // Perform request - shared := isShared(d, item["Id"]) - item["Shared"] = util.FormatBool(shared) - - // Decrement the active queue and notify that we are done - <-active - done <- true - } - - // Go, go, go! - for _, item := range items { - go checkStatus(item) - } - - // Wait for all goroutines to finish - for i := 0; i < len(items); i++ { - <-done - } -} - -func Info(d *gdrive.Drive, fileId string, sizeInBytes bool) error { - info, err := d.Files.Get(fileId).Do() - if err != nil { - return fmt.Errorf("An error occurred: %v\n", err) - } - printInfo(d, info, sizeInBytes) - return nil -} - -func printInfo(d *gdrive.Drive, f *drive.File, sizeInBytes bool) { - fields := map[string]string{ - "Id": f.Id, - "Title": f.Title, - "Description": f.Description, - "Size": util.FileSizeFormat(f.FileSize, sizeInBytes), - "Created": util.ISODateToLocal(f.CreatedDate), - "Modified": util.ISODateToLocal(f.ModifiedDate), - "Owner": strings.Join(f.OwnerNames, ", "), - "Md5sum": f.Md5Checksum, - "Shared": util.FormatBool(isShared(d, f.Id)), - "Parents": util.ParentList(f.Parents), - } - - order := []string{ - "Id", - "Title", - "Description", - "Size", - "Created", - "Modified", - "Owner", - "Md5sum", - "Shared", - "Parents", - } - util.Print(fields, order) -} - -// Create folder in drive -func Folder(d *gdrive.Drive, title string, parentId string, share bool) error { - info, err := makeFolder(d, title, parentId, share) - if err != nil { - return err - } - printInfo(d, info, false) - fmt.Printf("Folder '%s' created\n", info.Title) - return nil -} - -func makeFolder(d *gdrive.Drive, title string, parentId string, share bool) (*drive.File, error) { - // File instance - f := &drive.File{Title: title, MimeType: "application/vnd.google-apps.folder"} - // Set parent (if provided) - if parentId != "" { - p := &drive.ParentReference{Id: parentId} - f.Parents = []*drive.ParentReference{p} - } - // Create folder - info, err := d.Files.Insert(f).Do() - if err != nil { - return nil, fmt.Errorf("An error occurred creating the folder: %v\n", err) - } - // Share folder if the share flag was provided - if share { - Share(d, info.Id) - } - return info, err -} - -// Upload file to drive -func UploadStdin(d *gdrive.Drive, input io.ReadCloser, title string, parentId string, share bool, mimeType string, convert bool) error { - // File instance - f := &drive.File{Title: title} - // Set parent (if provided) - if parentId != "" { - p := &drive.ParentReference{Id: parentId} - f.Parents = []*drive.ParentReference{p} - } - getRate := util.MeasureTransferRate() - - if convert { - fmt.Printf("Converting to Google Docs format enabled\n") - } - - info, err := d.Files.Insert(f).Convert(convert).Media(input).Do() - if err != nil { - return fmt.Errorf("An error occurred uploading the document: %v\n", err) - } - - // Total bytes transferred - bytes := info.FileSize - - // Print information about uploaded file - printInfo(d, info, false) - fmt.Printf("MIME Type: %s\n", mimeType) - fmt.Printf("Uploaded '%s' at %s, total %s\n", info.Title, getRate(bytes), util.FileSizeFormat(bytes, false)) - - // Share file if the share flag was provided - if share { - err = Share(d, info.Id) - } - return err -} - -func Upload(d *gdrive.Drive, input *os.File, title string, parentId string, share bool, mimeType string, convert bool) error { - // Grab file info - inputInfo, err := input.Stat() - if err != nil { - return err - } - - if inputInfo.IsDir() { - return uploadDirectory(d, input, inputInfo, title, parentId, share, mimeType, convert) - } else { - return uploadFile(d, input, inputInfo, title, parentId, share, mimeType, convert) - } - - return nil -} - -func uploadDirectory(d *gdrive.Drive, input *os.File, inputInfo os.FileInfo, title string, parentId string, share bool, mimeType string, convert bool) error { - // Create folder - folder, err := makeFolder(d, filepath.Base(inputInfo.Name()), parentId, share) - if err != nil { - return err - } - - // Read all files in directory - files, err := input.Readdir(0) - if err != nil { - return err - } - - // Get current dir - currentDir, err := os.Getwd() - if err != nil { - return err - } - - // Go into directory - dstDir := filepath.Join(currentDir, inputInfo.Name()) - err = os.Chdir(dstDir) - if err != nil { - return err - } - - // Change back to original directory when done - defer func() { - os.Chdir(currentDir) - }() - - for _, fi := range files { - f, err := os.Open(fi.Name()) - if err != nil { - return err - } - - if fi.IsDir() { - err = uploadDirectory(d, f, fi, "", folder.Id, share, mimeType, convert) - } else { - err = uploadFile(d, f, fi, "", folder.Id, share, mimeType, convert) - } - - if err != nil { - return err - } - } - - return nil -} - -func uploadFile(d *gdrive.Drive, input *os.File, inputInfo os.FileInfo, title string, parentId string, share bool, mimeType string, convert bool) error { - if title == "" { - title = filepath.Base(inputInfo.Name()) - } - - if mimeType == "" { - mimeType = mime.TypeByExtension(filepath.Ext(title)) - } - - // File instance - f := &drive.File{Title: title, MimeType: mimeType} - // Set parent (if provided) - if parentId != "" { - p := &drive.ParentReference{Id: parentId} - f.Parents = []*drive.ParentReference{p} - } - getRate := util.MeasureTransferRate() - - if convert { - fmt.Printf("Converting to Google Docs format enabled\n") - } - - info, err := d.Files.Insert(f).Convert(convert).ResumableMedia(context.Background(), input, inputInfo.Size(), mimeType).Do() - if err != nil { - return fmt.Errorf("An error occurred uploading the document: %v\n", err) - } - - // Total bytes transferred - bytes := info.FileSize - - // Print information about uploaded file - printInfo(d, info, false) - fmt.Printf("MIME Type: %s\n", mimeType) - fmt.Printf("Uploaded '%s' at %s, total %s\n", info.Title, getRate(bytes), util.FileSizeFormat(bytes, false)) - - // Share file if the share flag was provided - if share { - err = Share(d, info.Id) - } - return err -} - -func DownloadLatest(d *gdrive.Drive, stdout bool, format string, force bool) error { - list, err := d.Files.List().Do() - if err != nil { - return err - } - - if len(list.Items) == 0 { - return fmt.Errorf("No files found") - } - - latestId := list.Items[0].Id - return Download(d, latestId, stdout, true, format, force) -} - -// Download file from drive -func Download(d *gdrive.Drive, fileId string, stdout, deleteAfterDownload bool, format string, force bool) error { - // Get file info - info, err := d.Files.Get(fileId).Do() - if err != nil { - return fmt.Errorf("An error occurred: %v\n", err) - } - - downloadUrl, extension, err := util.InternalDownloadUrlAndExtension(info, format) - if err != nil { - return err - } - - // Measure transfer rate - getRate := util.MeasureTransferRate() - - // GET the download url - res, err := d.Client().Get(downloadUrl) - if err != nil { - return fmt.Errorf("An error occurred: %v", err) - } - - // Close body on function exit - defer res.Body.Close() - - // Write file content to stdout - if stdout { - io.Copy(os.Stdout, res.Body) - return nil - } - - fileName := fmt.Sprintf("%s%s", info.Title, extension) - - // Check if file exists - if !force && util.FileExists(fileName) { - return fmt.Errorf("An error occurred: '%s' already exists", fileName) - } - - // Create a new file - outFile, err := os.Create(fileName) - if err != nil { - return fmt.Errorf("An error occurred: %v\n", err) - } - - // Close file on function exit - defer outFile.Close() - - // Save file to disk - bytes, err := io.Copy(outFile, res.Body) - if err != nil { - return fmt.Errorf("An error occurred: %s", err) - } - - fmt.Printf("Downloaded '%s' at %s, total %s\n", fileName, getRate(bytes), util.FileSizeFormat(bytes, false)) - - if deleteAfterDownload { - err = Delete(d, fileId) - } - return err -} - -// Delete file with given file id -func Delete(d *gdrive.Drive, fileId string) error { - info, err := d.Files.Get(fileId).Do() - if err != nil { - return fmt.Errorf("An error occurred: %v\n", err) - } - - if err := d.Files.Delete(fileId).Do(); err != nil { - return fmt.Errorf("An error occurred: %v\n", err) - - } - - fmt.Printf("Removed file '%s'\n", info.Title) - return nil -} - -// Make given file id readable by anyone -- auth not required to view/download file -func Share(d *gdrive.Drive, fileId string) error { - info, err := d.Files.Get(fileId).Do() - if err != nil { - return fmt.Errorf("An error occurred: %v\n", err) - } - - perm := &drive.Permission{ - Value: "me", - Type: "anyone", - Role: "reader", - } - - if _, err := d.Permissions.Insert(fileId, perm).Do(); err != nil { - return fmt.Errorf("An error occurred: %v\n", err) - } - - fmt.Printf("File '%s' is now readable by everyone @ %s\n", info.Title, util.PreviewUrl(fileId)) - return nil -} - -// Removes the 'anyone' permission -- auth will be required to view/download file -func Unshare(d *gdrive.Drive, fileId string) error { - info, err := d.Files.Get(fileId).Do() - if err != nil { - return fmt.Errorf("An error occurred: %v\n", err) - } - - if err := d.Permissions.Delete(fileId, "anyone").Do(); err != nil { - return fmt.Errorf("An error occurred: %v\n", err) - } - - fmt.Printf("File '%s' is no longer shared to 'anyone'\n", info.Title) - return nil -} - -func Quota(d *gdrive.Drive, sizeInBytes bool) error { - info, err := d.About.Get().Do() - if err != nil { - return fmt.Errorf("An error occurred: %v\n", err) - } - - fmt.Printf("Used: %s\n", util.FileSizeFormat(info.QuotaBytesUsed, sizeInBytes)) - fmt.Printf("Free: %s\n", util.FileSizeFormat(info.QuotaBytesTotal-info.QuotaBytesUsed, sizeInBytes)) - fmt.Printf("Total: %s\n", util.FileSizeFormat(info.QuotaBytesTotal, sizeInBytes)) - return nil -} - -func isShared(d *gdrive.Drive, fileId string) bool { - r, err := d.Permissions.List(fileId).Do() - if err != nil { - fmt.Printf("An error occurred: %v\n", err) - os.Exit(1) - } - - for _, perm := range r.Items { - if perm.Type == "anyone" { - return true - } - } - return false -} diff --git a/cli/context.go b/cli/context.go new file mode 100644 index 00000000..b1037b09 --- /dev/null +++ b/cli/context.go @@ -0,0 +1,41 @@ +package cli + +import ( + "strconv" +) + +type Context struct { + args Arguments + handlers []*Handler +} + +func (self Context) Args() Arguments { + return self.args +} + +func (self Context) Handlers() []*Handler { + return self.handlers +} + +func (self Context) FilterHandlers(prefix string) []*Handler { + return filterHandlers(self.handlers, prefix) +} + +type Arguments map[string]string + +func (self Arguments) String(key string) string { + value, _ := self[key] + return value +} + +func (self Arguments) Int64(key string) int64 { + value, _ := self[key] + n, _ := strconv.ParseInt(value, 10, 64) + return n +} + +func (self Arguments) Bool(key string) bool { + value, _ := self[key] + b, _ := strconv.ParseBool(value) + return b +} diff --git a/cli/flags.go b/cli/flags.go new file mode 100644 index 00000000..a5aa2761 --- /dev/null +++ b/cli/flags.go @@ -0,0 +1,132 @@ +package cli + +// TODO +// Default values? Default string values? Parser must always return a value +// Support invalid flag combinations? + + +type Flag interface { + GetPatterns() []string + GetName() string + GetDescription() string + GetParser() Parser +} + +func getFlagParser(flags []Flag) Parser { + var parsers []Parser + + for _, flag := range flags { + parsers = append(parsers, flag.GetParser()) + } + + return FlagParser{parsers} +} + + +type BoolFlag struct { + Patterns []string + Name string + Description string + DefaultValue bool + OmitValue bool +} + +func (self BoolFlag) GetName() string { + return self.Name +} + +func (self BoolFlag) GetPatterns() []string { + return self.Patterns +} + +func (self BoolFlag) GetDescription() string { + return self.Description +} + +func (self BoolFlag) GetParser() Parser { + var parsers []Parser + for _, p := range self.Patterns { + parsers = append(parsers, BoolFlagParser{ + pattern: p, + key: self.Name, + omitValue: self.OmitValue, + defaultValue: self.DefaultValue, + }) + } + + if len(parsers) == 1 { + return parsers[0] + } + return ShortCircuitParser{parsers} +} + + +type StringFlag struct { + Patterns []string + Name string + Description string + DefaultValue string +} + +func (self StringFlag) GetName() string { + return self.Name +} + +func (self StringFlag) GetPatterns() []string { + return self.Patterns +} + +func (self StringFlag) GetDescription() string { + return self.Description +} + +func (self StringFlag) GetParser() Parser { + var parsers []Parser + for _, p := range self.Patterns { + parsers = append(parsers, StringFlagParser{ + pattern: p, + key: self.Name, + defaultValue: self.DefaultValue, + }) + } + + if len(parsers) == 1 { + return parsers[0] + } + return ShortCircuitParser{parsers} +} + +type IntFlag struct { + Patterns []string + Name string + Description string + DefaultValue int64 +} + +func (self IntFlag) GetName() string { + return self.Name +} + +func (self IntFlag) GetPatterns() []string { + return self.Patterns +} + +func (self IntFlag) GetDescription() string { + return self.Description +} + +func (self IntFlag) GetParser() Parser { + var parsers []Parser + for _, p := range self.Patterns { + parsers = append(parsers, IntFlagParser{ + pattern: p, + key: self.Name, + defaultValue: self.DefaultValue, + }) + } + + if len(parsers) == 1 { + return parsers[0] + } + return ShortCircuitParser{parsers} +} diff --git a/cli/handler.go b/cli/handler.go new file mode 100644 index 00000000..5cd13f80 --- /dev/null +++ b/cli/handler.go @@ -0,0 +1,125 @@ +package cli + +import ( + "fmt" + "regexp" + "strings" +) + +type Flags map[string][]Flag + +var handlers []*Handler + +type Handler struct { + Pattern string + Flags Flags + Callback func(Context) + Description string +} + +func (self *Handler) getParser() Parser { + var parsers []Parser + + for _, pattern := range splitPattern(self.Pattern) { + if isOptional(pattern) { + name := optionalName(pattern) + parser := getFlagParser(self.Flags[name]) + parsers = append(parsers, parser) + } else if isCaptureGroup(pattern) { + parsers = append(parsers, CaptureGroupParser{pattern}) + } else { + parsers = append(parsers, EqualParser{pattern}) + } + } + + return CompleteParser{parsers} +} + +func SetHandlers(h []*Handler) { + handlers = h +} + +func AddHandler(pattern string, flags Flags, callback func(Context), desc string) { + handlers = append(handlers, &Handler{ + Pattern: pattern, + Flags: flags, + Callback: callback, + Description: desc, + }) +} + +func findHandler(args []string) *Handler { + for _, h := range handlers { + if _, ok := h.getParser().Match(args); ok { + return h + } + } + return nil +} + + +func Handle(args []string) bool { + h := findHandler(args) + if h == nil { + return false + } + + _, data := h.getParser().Capture(args) + fmt.Println(data) + ctx := Context{ + args: data, + handlers: handlers, + } + h.Callback(ctx) + return true +} + +func filterHandlers(handlers []*Handler, prefix string) []*Handler { + matches := []*Handler{} + + for _, h := range handlers { + pattern := strings.Join(stripOptionals(splitPattern(h.Pattern)), " ") + if strings.HasPrefix(pattern, prefix) { + matches = append(matches, h) + } + } + + return matches +} + + +// Split on spaces but ignore spaces inside <...> and [...] +func splitPattern(pattern string) []string { + re := regexp.MustCompile(`(<[^>]+>|\[[^\]]+]|\S+)`) + matches := []string{} + + for _, value := range re.FindAllStringSubmatch(pattern, -1) { + matches = append(matches, value[1]) + } + + return matches +} + +func isCaptureGroup(arg string) bool { + return strings.HasPrefix(arg, "<") && strings.HasSuffix(arg, ">") +} + +func isOptional(arg string) bool { + return strings.HasPrefix(arg, "[") && strings.HasSuffix(arg, "]") +} + +func optionalName(s string) string { + return s[1:len(s) - 1] +} + +// Strip optional groups from pattern +func stripOptionals(pattern []string) []string { + newArgs := []string{} + + for _, arg := range pattern { + if !isOptional(arg) { + newArgs = append(newArgs, arg) + } + } + return newArgs +} diff --git a/cli/parser.go b/cli/parser.go new file mode 100644 index 00000000..433a4b1b --- /dev/null +++ b/cli/parser.go @@ -0,0 +1,307 @@ +package cli + +import ( + "fmt" + "strconv" +) + +type Parser interface { + Match([]string) ([]string, bool) + Capture([]string) ([]string, map[string]string) +} + +type CompleteParser struct { + parsers []Parser +} + +func (self CompleteParser) Match(values []string) ([]string, bool) { + remainingValues := values + + for _, parser := range self.parsers { + var ok bool + remainingValues, ok = parser.Match(remainingValues) + if !ok { + return remainingValues, false + } + } + + return remainingValues, len(remainingValues) == 0 +} + +func (self CompleteParser) Capture(values []string) ([]string, map[string]string) { + remainingValues := values + data := map[string]string{} + + for _, parser := range self.parsers { + var captured map[string]string + remainingValues, captured = parser.Capture(remainingValues) + for key, value := range captured { + data[key] = value + } + } + + return remainingValues, data +} + +func (self CompleteParser) String() string { + return fmt.Sprintf("CompleteParser %v", self.parsers) +} + + +type EqualParser struct { + value string +} + +func (self EqualParser) Match(values []string) ([]string, bool) { + if len(values) == 0 { + return values, false + } + + if self.value == values[0] { + return values[1:], true + } + + return values, false +} + +func (self EqualParser) Capture(values []string) ([]string, map[string]string) { + remainingValues, _ := self.Match(values) + return remainingValues, nil +} + +func (self EqualParser) String() string { + return fmt.Sprintf("EqualParser '%s'", self.value) +} + + +type CaptureGroupParser struct { + value string +} + +func (self CaptureGroupParser) Match(values []string) ([]string, bool) { + if len(values) == 0 { + return values, false + } + + return values[1:], true +} + +func (self CaptureGroupParser) key() string { + return self.value[1:len(self.value) - 1] +} + +func (self CaptureGroupParser) Capture(values []string) ([]string, map[string]string) { + if remainingValues, ok := self.Match(values); ok { + return remainingValues, map[string]string{self.key(): values[0]} + } + + return values, nil +} + +func (self CaptureGroupParser) String() string { + return fmt.Sprintf("CaptureGroupParser '%s'", self.value) +} + + + +type BoolFlagParser struct { + pattern string + key string + omitValue bool + defaultValue bool +} + +func (self BoolFlagParser) Match(values []string) ([]string, bool) { + if self.omitValue { + if len(values) == 0 { + return values, false + } + + if self.pattern == values[0] { + return values[1:], true + } + + return values, false + } else { + if len(values) < 2 { + return values, false + } + + if self.pattern != values[0] { + return values, false + } + + // Check that value is a valid boolean + if _, err := strconv.ParseBool(values[1]); err != nil { + return values, false + } + + return values[2:], true + } +} + +func (self BoolFlagParser) Capture(values []string) ([]string, map[string]string) { + remainingValues, ok := self.Match(values) + if !ok && !self.omitValue { + return remainingValues, map[string]string{self.key: fmt.Sprintf("%t", self.defaultValue)} + } + return remainingValues, map[string]string{self.key: fmt.Sprintf("%t", ok)} +} + +func (self BoolFlagParser) String() string { + return fmt.Sprintf("BoolFlagParser '%s'", self.pattern) +} + +type StringFlagParser struct { + pattern string + key string + defaultValue string +} + +func (self StringFlagParser) Match(values []string) ([]string, bool) { + if len(values) < 2 { + return values, false + } + + if self.pattern != values[0] { + return values, false + } + + return values[2:], true +} + +func (self StringFlagParser) Capture(values []string) ([]string, map[string]string) { + remainingValues, ok := self.Match(values) + if ok { + return remainingValues, map[string]string{self.key: values[1]} + } + + return values, map[string]string{self.key: self.defaultValue} +} + +func (self StringFlagParser) String() string { + return fmt.Sprintf("StringFlagParser '%s'", self.pattern) +} + +type IntFlagParser struct { + pattern string + key string + defaultValue int64 +} + +func (self IntFlagParser) Match(values []string) ([]string, bool) { + if len(values) < 2 { + return values, false + } + + if self.pattern != values[0] { + return values, false + } + + // Check that value is a valid integer + if _, err := strconv.ParseInt(values[1], 10, 64); err != nil { + return values, false + } + + return values[2:], true +} + +func (self IntFlagParser) Capture(values []string) ([]string, map[string]string) { + remainingValues, ok := self.Match(values) + if ok { + return remainingValues, map[string]string{self.key: values[1]} + } + + return values, map[string]string{self.key: fmt.Sprintf("%d", self.defaultValue)} +} + +func (self IntFlagParser) String() string { + return fmt.Sprintf("IntFlagParser '%s'", self.pattern) +} + + +type FlagParser struct { + parsers []Parser +} + +func (self FlagParser) Match(values []string) ([]string, bool) { + remainingValues := values + var oneOrMoreMatches bool + + for _, parser := range self.parsers { + var ok bool + remainingValues, ok = parser.Match(remainingValues) + if ok { + oneOrMoreMatches = true + } + } + + // Recurse while we have one or more matches + if oneOrMoreMatches { + return self.Match(remainingValues) + } + + return remainingValues, true +} + +func (self FlagParser) Capture(values []string) ([]string, map[string]string) { + data := map[string]string{} + remainingValues := values + + for _, parser := range self.parsers { + var captured map[string]string + remainingValues, captured = parser.Capture(remainingValues) + for key, value := range captured { + // Skip value if it already exists and new value is an empty string + if _, exists := data[key]; exists && value == "" { + continue + } + + data[key] = value + } + } + return remainingValues, data +} + +func (self FlagParser) String() string { + return fmt.Sprintf("FlagParser %v", self.parsers) +} + + +type ShortCircuitParser struct { + parsers []Parser +} + +func (self ShortCircuitParser) Match(values []string) ([]string, bool) { + remainingValues := values + + for _, parser := range self.parsers { + var ok bool + remainingValues, ok = parser.Match(remainingValues) + if ok { + return remainingValues, true + } + } + + return remainingValues, false +} + +func (self ShortCircuitParser) Capture(values []string) ([]string, map[string]string) { + if len(self.parsers) == 0 { + return values, nil + } + + for _, parser := range self.parsers { + if _, ok := parser.Match(values); ok { + return parser.Capture(values) + } + } + + // No parsers matched at this point, + // just return the capture value of the first one + return self.parsers[0].Capture(values) +} + +func (self ShortCircuitParser) String() string { + return fmt.Sprintf("ShortCircuitParser %v", self.parsers) +} diff --git a/client/auth.go b/client/auth.go new file mode 100644 index 00000000..fb6852dc --- /dev/null +++ b/client/auth.go @@ -0,0 +1,32 @@ +package client + +import ( + "net/http" + "golang.org/x/oauth2" + "go4.org/oauthutil" +) + +type authCodeFn func(string) func() string + +func NewOauthClient(clientId, clientSecret, cacheFile string, authFn authCodeFn) *http.Client { + conf := &oauth2.Config{ + ClientID: clientId, + ClientSecret: clientSecret, + Scopes: []string{"https://www.googleapis.com/auth/drive"}, + RedirectURL: "urn:ietf:wg:oauth:2.0:oob", + Endpoint: oauth2.Endpoint{ + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://accounts.google.com/o/oauth2/token", + }, + } + + authUrl := conf.AuthCodeURL("state", oauth2.AccessTypeOffline) + + tokenSource := oauthutil.TokenSource{ + Config: conf, + CacheFile: cacheFile, + AuthCode: authFn(authUrl), + } + + return oauth2.NewClient(oauth2.NoContext, tokenSource) +} diff --git a/client/client.go b/client/client.go new file mode 100644 index 00000000..1b48bbb4 --- /dev/null +++ b/client/client.go @@ -0,0 +1,28 @@ +package client + +import ( + "net/http" + "google.golang.org/api/drive/v3" +) + +type Client struct { + service *drive.Service + http *http.Client +} + +func (self *Client) Service() *drive.Service { + return self.service +} + +func (self *Client) Http() *http.Client { + return self.http +} + +func NewClient(client *http.Client) (*Client, error) { + service, err := drive.New(client) + if err != nil { + return nil, err + } + + return &Client{service, client}, nil +} diff --git a/drive.go b/drive.go index 67dab79f..2466087c 100644 --- a/drive.go +++ b/drive.go @@ -1,4 +1,4 @@ -package main +package foo import ( "fmt" diff --git a/drive/files.go b/drive/files.go new file mode 100644 index 00000000..ed3c3497 --- /dev/null +++ b/drive/files.go @@ -0,0 +1,68 @@ +package drive + +import ( + "fmt" + "io" + "os" +) + +func (self *Drive) List(args ListFilesArgs) { + fileList, err := self.service.Files.List().PageSize(args.MaxFiles).Q(args.Query).Fields("nextPageToken", "files(id,name,size,createdTime)").Do() + if err != nil { + exitF("Failed listing files: %s\n", err.Error()) + } + + for _, f := range fileList.Files { + fmt.Printf("%s %s %d %s\n", f.Id, f.Name, f.Size, f.CreatedTime) + } +} + + +func (self *Drive) Download(args DownloadFileArgs) { + getFile := self.service.Files.Get(args.Id) + + f, err := getFile.Do() + if err != nil { + exitF("Failed to get file: %s", err.Error()) + } + + res, err := getFile.Download() + if err != nil { + exitF("Failed to download file: %s", err.Error()) + } + + // Close body on function exit + defer res.Body.Close() + + if args.Stdout { + // Write file content to stdout + io.Copy(os.Stdout, res.Body) + return + } + + // Check if file exists + if !args.Force && fileExists(f.Name) { + exitF("File '%s' already exists, use --force to overwrite", f.Name) + } + + // Create new file + outFile, err := os.Create(f.Name) + if err != nil { + exitF("Unable to create new file: %s", err.Error()) + } + + // Close file on function exit + defer outFile.Close() + + // Save file to disk + bytes, err := io.Copy(outFile, res.Body) + if err != nil { + exitF("Failed saving file: %s", err.Error()) + } + + fmt.Printf("Downloaded '%s' at %s, total %d\n", f.Name, "x/s", bytes) + + //if deleteSourceFile { + // self.Delete(args.Id) + //} +} diff --git a/drive/types.go b/drive/types.go new file mode 100644 index 00000000..d70dfb5b --- /dev/null +++ b/drive/types.go @@ -0,0 +1,37 @@ +package drive + +import ( + "net/http" + "google.golang.org/api/drive/v3" +) + +type Client interface { + Service() *drive.Service + Http() *http.Client +} + +type Drive struct { + service *drive.Service + http *http.Client +} + +func NewDrive(client Client) *Drive { + return &Drive{ + service: client.Service(), + http: client.Http(), + } +} + +type ListFilesArgs struct { + MaxFiles int64 + Query string + SkipHeader bool + SizeInBytes bool +} + +type DownloadFileArgs struct { + Id string + Force bool + NoProgress bool + Stdout bool +} diff --git a/drive/util.go b/drive/util.go new file mode 100644 index 00000000..db48d28c --- /dev/null +++ b/drive/util.go @@ -0,0 +1,20 @@ +package drive + +import ( + "fmt" + "os" +) + +func exitF(format string, a ...interface{}) { + fmt.Fprintf(os.Stderr, format, a...) + fmt.Println("") + os.Exit(1) +} + +func fileExists(path string) bool { + _, err := os.Stat(path) + if err == nil { + return true + } + return false +} diff --git a/gdrive.go b/gdrive.go new file mode 100644 index 00000000..13c8a812 --- /dev/null +++ b/gdrive.go @@ -0,0 +1,270 @@ +package main + +import ( + "fmt" + "os" + "./cli" +) + +const Name = "gdrive" +const Version = "2.0.0" + +const ClientId = "367116221053-7n0vf5akeru7on6o2fjinrecpdoe99eg.apps.googleusercontent.com" +const ClientSecret = "1qsNodXNaWq1mQuBjUjmvhoO" + +const DefaultMaxFiles = 100 +const DefaultChunkSize = 4194304 + +var DefaultConfigDir = GetDefaultConfigDir() +var DefaultTokenFilePath = GetDefaultTokenFilePath() + + +func main() { + globalFlags := []cli.Flag{ + cli.StringFlag{ + Name: "configDir", + Patterns: []string{"-c", "--config"}, + Description: fmt.Sprintf("Application path, default: %s", DefaultConfigDir), + DefaultValue: DefaultConfigDir, + }, + } + + handlers := []*cli.Handler{ + &cli.Handler{ + Pattern: "[global options] list [options]", + Description: "List files", + Callback: listHandler, + Flags: cli.Flags{ + "global options": globalFlags, + "options": []cli.Flag{ + cli.IntFlag{ + Name: "maxFiles", + Patterns: []string{"-m", "--max"}, + Description: fmt.Sprintf("Max files to list, default: %d", DefaultMaxFiles), + DefaultValue: DefaultMaxFiles, + }, + cli.StringFlag{ + Name: "query", + Patterns: []string{"-q", "--query"}, + Description: "Query, see https://developers.google.com/drive/search-parameters", + }, + cli.BoolFlag{ + Name: "skipHeader", + Patterns: []string{"--noheader"}, + Description: "Dont print the header", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "sizeInBytes", + Patterns: []string{"--bytes"}, + Description: "Size in bytes", + OmitValue: true, + }, + }, + }, + }, + &cli.Handler{ + Pattern: "[global options] download [options] ", + Description: "Download file or directory", + Callback: downloadHandler, + Flags: cli.Flags{ + "global options": globalFlags, + "options": []cli.Flag{ + cli.BoolFlag{ + Name: "force", + Patterns: []string{"-f", "--force"}, + Description: "Overwrite existing file", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "noProgress", + Patterns: []string{"--noprogress"}, + Description: "Hide progress", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "stdout", + Patterns: []string{"--stdout"}, + Description: "Write file content to stdout", + OmitValue: true, + }, + }, + }, + }, + &cli.Handler{ + Pattern: "[global options] upload [options] ", + Description: "Upload file or directory", + Callback: handler, + Flags: cli.Flags{ + "global options": globalFlags, + "options": []cli.Flag{ + cli.BoolFlag{ + Name: "recursive", + Patterns: []string{"-r", "--recursive"}, + Description: "Upload directory recursively", + OmitValue: true, + }, + cli.StringFlag{ + Name: "parent", + Patterns: []string{"-p", "--parent"}, + Description: "Parent id, used to upload file to a specific directory", + }, + cli.StringFlag{ + Name: "name", + Patterns: []string{"--name"}, + Description: "Filename", + }, + cli.BoolFlag{ + Name: "progress", + Patterns: []string{"--progress"}, + Description: "Show progress", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "stdin", + Patterns: []string{"--stdin"}, + Description: "Use stdin as file content", + OmitValue: true, + }, + cli.StringFlag{ + Name: "mime", + Patterns: []string{"--mime"}, + Description: "Force mime type", + }, + cli.BoolFlag{ + Name: "share", + Patterns: []string{"--share"}, + Description: "Share file", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "share", + Patterns: []string{"--convert"}, + Description: "Convert file to google docs format", + OmitValue: true, + }, + cli.IntFlag{ + Name: "chunksize", + Patterns: []string{"--chunksize"}, + Description: fmt.Sprintf("Set chunk size in bytes. Minimum is 262144, default is %d", DefaultChunkSize), + DefaultValue: DefaultChunkSize, + }, + }, + }, + }, + &cli.Handler{ + Pattern: "[global options] info [options] ", + Description: "Show file info", + Callback: handler, + Flags: cli.Flags{ + "global options": globalFlags, + "options": []cli.Flag{ + cli.BoolFlag{ + Name: "sizeInBytes", + Patterns: []string{"--bytes"}, + Description: "Show size in bytes", + OmitValue: true, + }, + }, + }, + }, + &cli.Handler{ + Pattern: "[global options] mkdir [options] ", + Description: "Create directory", + Callback: handler, + Flags: cli.Flags{ + "global options": globalFlags, + "options": []cli.Flag{ + cli.StringFlag{ + Name: "parent", + Patterns: []string{"-p", "--parent"}, + Description: "Parent id of created directory", + }, + cli.BoolFlag{ + Name: "share", + Patterns: []string{"--share"}, + Description: "Share created directory", + OmitValue: true, + }, + }, + }, + }, + &cli.Handler{ + Pattern: "[global options] share ", + Description: "Share file or directory", + Callback: handler, + Flags: cli.Flags{ + "global options": globalFlags, + "options": []cli.Flag{ + cli.BoolFlag{ + Name: "revoke", + Patterns: []string{"--revoke"}, + Description: "Unshare file or directory", + OmitValue: true, + }, + }, + }, + }, + &cli.Handler{ + Pattern: "[global options] url [options] ", + Description: "Get url to file or directory", + Callback: handler, + Flags: cli.Flags{ + "global options": globalFlags, + "options": []cli.Flag{ + cli.BoolFlag{ + Name: "download", + Patterns: []string{"--download"}, + Description: "Download url", + OmitValue: true, + }, + }, + }, + }, + &cli.Handler{ + Pattern: "[global options] delete ", + Description: "Delete file or directory", + Callback: deleteHandler, + Flags: cli.Flags{ + "global options": globalFlags, + }, + }, + &cli.Handler{ + Pattern: "[global options] quota [options]", + Description: "Show free space", + Callback: handler, + Flags: cli.Flags{ + "global options": globalFlags, + "options": []cli.Flag{ + cli.BoolFlag{ + Name: "sizeInBytes", + Patterns: []string{"--bytes"}, + Description: "Show size in bytes", + OmitValue: true, + }, + }, + }, + }, + &cli.Handler{ + Pattern: "version", + Description: "Print application version", + Callback: printVersion, + }, + &cli.Handler{ + Pattern: "help", + Description: "Print help", + Callback: printHelp, + }, + &cli.Handler{ + Pattern: "help ", + Description: "Print subcommand help", + Callback: printCommandHelp, + }, + } + + cli.SetHandlers(handlers) + + if ok := cli.Handle(os.Args[1:]); !ok { + ExitF("No valid arguments given, use '%s help' to see available commands", Name) + } +} diff --git a/gdrive/handlers.go b/gdrive/handlers.go new file mode 100644 index 00000000..e69de29b diff --git a/handlers.go b/handlers.go new file mode 100644 index 00000000..69aad94a --- /dev/null +++ b/handlers.go @@ -0,0 +1,100 @@ +package main + +import ( + "fmt" + "strings" + "./cli" + "./client" + "./drive" +) + +func listHandler(ctx cli.Context) { + args := ctx.Args() + gdrive := newDrive() + + gdrive.List(drive.ListFilesArgs{ + MaxFiles: args.Int64("maxFiles"), + Query: args.String("query"), + SkipHeader: args.Bool("skipHeader"), + SizeInBytes: args.Bool("sizeInBytes"), + }) +} + +func downloadHandler(ctx cli.Context) { + args := ctx.Args() + gdrive := newDrive() + + gdrive.Download(drive.DownloadFileArgs{ + Id: args.String("id"), + Force: args.Bool("force"), + Stdout: args.Bool("stdout"), + NoProgress: args.Bool("noprogress"), + }) +} + +func deleteHandler(ctx cli.Context) { + fmt.Println("Deleting...") +} + +func handler(ctx cli.Context) { + fmt.Println("handler...") +} + +func printVersion(ctx cli.Context) { + fmt.Printf("%s v%s\n", Name, Version) +} + +func printHelp(ctx cli.Context) { + fmt.Printf("%s usage:\n\n", Name) + + for _, h := range ctx.Handlers() { + fmt.Printf("%s %s (%s)\n", Name, h.Pattern, h.Description) + } +} + +func printCommandHelp(ctx cli.Context) { + handlers := ctx.FilterHandlers(ctx.Args().String("subcommand")) + + if len(handlers) == 0 { + ExitF("Subcommand not found") + } + + if len(handlers) > 1 { + ExitF("More than one matching subcommand, be more specific") + } + + handler := handlers[0] + + fmt.Printf("%s %s (%s)\n", Name, handler.Pattern, handler.Description) + for name, flags := range handler.Flags { + fmt.Printf("\n%s:\n", name) + for _, flag := range flags { + fmt.Printf(" %s (%s)\n", strings.Join(flag.GetPatterns(), ", "), flag.GetDescription()) + } + } +} + +func newDrive() *drive.Drive { + oauth := client.NewOauthClient(ClientId, ClientSecret, DefaultTokenFilePath, authCodePrompt) + client, err := client.NewClient(oauth) + if err != nil { + ExitF("Failed getting drive: %s", err.Error()) + } + + return drive.NewDrive(client) +} + +func authCodePrompt(url string) func() string { + return func() string { + fmt.Println("Authentication needed") + fmt.Println("Go to the following url in your browser:") + fmt.Printf("%s\n\n", url) + fmt.Print("Enter verification code: ") + + var code string + if _, err := fmt.Scan(&code); err != nil { + fmt.Printf("Failed reading code: %s", err.Error()) + } + return code + } +} diff --git a/util.go b/util.go new file mode 100644 index 00000000..064ce3fd --- /dev/null +++ b/util.go @@ -0,0 +1,29 @@ +package main + +import ( + "runtime" + "path/filepath" + "fmt" + "os" +) + +func GetDefaultConfigDir() string { + return filepath.Join(Homedir(), ".gdrive") +} + +func GetDefaultTokenFilePath() string { + return filepath.Join(GetDefaultConfigDir(), "token.json") +} + +func Homedir() string { + if runtime.GOOS == "windows" { + return os.Getenv("APPDATA") + } + return os.Getenv("HOME") +} + +func ExitF(format string, a ...interface{}) { + fmt.Fprintf(os.Stderr, format, a...) + fmt.Println("") + os.Exit(1) +} From 784dd2128197236b5b0dfb08e8f0ca41ca723e87 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 16 Jan 2016 16:51:40 +0100 Subject: [PATCH 002/195] FileSource TokenSource --- client/auth.go | 23 +++++++++++++------- client/token.go | 57 +++++++++++++++++++++++++++++++++++++++++++++++++ client/util.go | 22 +++++++++++++++++++ handlers.go | 6 +++++- 4 files changed, 99 insertions(+), 9 deletions(-) create mode 100644 client/token.go create mode 100644 client/util.go diff --git a/client/auth.go b/client/auth.go index fb6852dc..136ace0c 100644 --- a/client/auth.go +++ b/client/auth.go @@ -3,12 +3,11 @@ package client import ( "net/http" "golang.org/x/oauth2" - "go4.org/oauthutil" ) type authCodeFn func(string) func() string -func NewOauthClient(clientId, clientSecret, cacheFile string, authFn authCodeFn) *http.Client { +func NewOauthClient(clientId, clientSecret, tokenFile string, authFn authCodeFn) (*http.Client, error) { conf := &oauth2.Config{ ClientID: clientId, ClientSecret: clientSecret, @@ -20,13 +19,21 @@ func NewOauthClient(clientId, clientSecret, cacheFile string, authFn authCodeFn) }, } - authUrl := conf.AuthCodeURL("state", oauth2.AccessTypeOffline) + // Read cached token + token, exists, err := ReadToken(tokenFile) + if err != nil { + return nil, err + } - tokenSource := oauthutil.TokenSource{ - Config: conf, - CacheFile: cacheFile, - AuthCode: authFn(authUrl), + // Request auth code if token does not exist + if !exists { + authUrl := conf.AuthCodeURL("state", oauth2.AccessTypeOffline) + authCode := authFn(authUrl)() + token, err = conf.Exchange(oauth2.NoContext, authCode) } - return oauth2.NewClient(oauth2.NoContext, tokenSource) + return oauth2.NewClient( + oauth2.NoContext, + FileSource(tokenFile, token, conf), + ), nil } diff --git a/client/token.go b/client/token.go new file mode 100644 index 00000000..09312844 --- /dev/null +++ b/client/token.go @@ -0,0 +1,57 @@ +package client + +import ( + "golang.org/x/oauth2" + "encoding/json" + "io/ioutil" +) + + +func FileSource(path string, token *oauth2.Token, conf *oauth2.Config) oauth2.TokenSource { + return &fileSource{ + tokenPath: path, + tokenSource: conf.TokenSource(oauth2.NoContext, token), + } +} + +type fileSource struct { + tokenPath string + tokenSource oauth2.TokenSource +} + +func (self *fileSource) Token() (*oauth2.Token, error) { + token, err := self.tokenSource.Token() + if err != nil { + return token, err + } + + // Save token to file + SaveToken(self.tokenPath, token) + + return token, nil +} + +func ReadToken(path string) (*oauth2.Token, bool, error) { + if !fileExists(path) { + return nil, false, nil + } + + content, err := ioutil.ReadFile(path) + if err != nil { + return nil, true, err + } + token := &oauth2.Token{} + return token, true, json.Unmarshal(content, token) +} + +func SaveToken(path string, token *oauth2.Token) error { + data, err := json.MarshalIndent(token, "", " ") + if err != nil { + return err + } + + if err = mkdir(path); err != nil { + return err + } + return ioutil.WriteFile(path, data, 0600) +} diff --git a/client/util.go b/client/util.go new file mode 100644 index 00000000..b600fd6f --- /dev/null +++ b/client/util.go @@ -0,0 +1,22 @@ +package client + +import ( + "os" + "path/filepath" +) + +func mkdir(path string) error { + dir := filepath.Dir(path) + if fileExists(dir) { + return nil + } + return os.Mkdir(dir, 0700) +} + +func fileExists(path string) bool { + _, err := os.Stat(path) + if err == nil { + return true + } + return false +} diff --git a/handlers.go b/handlers.go index 69aad94a..a4bb3e2e 100644 --- a/handlers.go +++ b/handlers.go @@ -75,7 +75,11 @@ func printCommandHelp(ctx cli.Context) { } func newDrive() *drive.Drive { - oauth := client.NewOauthClient(ClientId, ClientSecret, DefaultTokenFilePath, authCodePrompt) + oauth, err := client.NewOauthClient(ClientId, ClientSecret, DefaultTokenFilePath, authCodePrompt) + if err != nil { + ExitF("Failed getting oauth client: %s", err.Error()) + } + client, err := client.NewClient(oauth) if err != nil { ExitF("Failed getting drive: %s", err.Error()) From f2ab00ce73744c95a986100784ec4af92cd83c33 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 16 Jan 2016 18:24:31 +0100 Subject: [PATCH 003/195] Check for missing refresh token --- client/auth.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/client/auth.go b/client/auth.go index 136ace0c..9ea1ecea 100644 --- a/client/auth.go +++ b/client/auth.go @@ -25,8 +25,9 @@ func NewOauthClient(clientId, clientSecret, tokenFile string, authFn authCodeFn) return nil, err } - // Request auth code if token does not exist - if !exists { + // Require auth code if token file does not exist + // or refresh token is missing + if !exists || token.RefreshToken == "" { authUrl := conf.AuthCodeURL("state", oauth2.AccessTypeOffline) authCode := authFn(authUrl)() token, err = conf.Exchange(oauth2.NoContext, authCode) From 50b0e69a163cd625b257d9d49dbbb880b83eeeca Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 16 Jan 2016 18:27:55 +0100 Subject: [PATCH 004/195] v2 --- util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util.go b/util.go index 064ce3fd..f5cec715 100644 --- a/util.go +++ b/util.go @@ -12,7 +12,7 @@ func GetDefaultConfigDir() string { } func GetDefaultTokenFilePath() string { - return filepath.Join(GetDefaultConfigDir(), "token.json") + return filepath.Join(GetDefaultConfigDir(), "token_v2.json") } func Homedir() string { From 53f76875dc0cc926a05dc43c5fa2a5f81b65ad36 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 16 Jan 2016 19:58:48 +0100 Subject: [PATCH 005/195] Basic upload support --- drive/files.go | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++ drive/types.go | 10 +++++++++ gdrive.go | 25 ++++++---------------- handlers.go | 16 +++++++++++++++ 4 files changed, 88 insertions(+), 19 deletions(-) diff --git a/drive/files.go b/drive/files.go index ed3c3497..49cf0342 100644 --- a/drive/files.go +++ b/drive/files.go @@ -3,7 +3,11 @@ package drive import ( "fmt" "io" + "mime" "os" + "path/filepath" + "google.golang.org/api/drive/v3" + "golang.org/x/net/context" ) func (self *Drive) List(args ListFilesArgs) { @@ -66,3 +70,55 @@ func (self *Drive) Download(args DownloadFileArgs) { // self.Delete(args.Id) //} } + +func (self *Drive) Upload(args UploadFileArgs) { + //if args.Stdin { + // self.uploadStdin() + //} + + srcFile, err := os.Open(args.Path) + if err != nil { + exitF("Failed to open file: %s", err.Error()) + } + + srcFileInfo, err := srcFile.Stat() + if err != nil { + exitF("Failed to read file metadata: %s", err.Error()) + } + + // Instantiate empty drive file + dstFile := &drive.File{} + + // Use provided file name or use filename + if args.Name == "" { + dstFile.Name = filepath.Base(srcFileInfo.Name()) + } else { + dstFile.Name = args.Name + } + + // Set provided mime type or get type based on file extension + if args.Mime == "" { + dstFile.MimeType = mime.TypeByExtension(filepath.Ext(dstFile.Name)) + } else { + dstFile.MimeType = args.Mime + } + + // Set parent folder if provided + if args.Parent != "" { + dstFile.Parents = []string{args.Parent} + } + + f, err := self.service.Files.Create(dstFile).ResumableMedia(context.Background(), srcFile, srcFileInfo.Size(), dstFile.MimeType).Do() + if err != nil { + exitF("Failed to upload file: %s", err.Error()) + } + + fmt.Printf("Uploaded '%s' at %s, total %d\n", f.Name, "x/s", f.Size) + //if args.Share { + // self.Share(TODO) + //} +} + +//func newFile(args UploadFileArgs) *drive.File { +// +//} diff --git a/drive/types.go b/drive/types.go index d70dfb5b..6c1bd147 100644 --- a/drive/types.go +++ b/drive/types.go @@ -35,3 +35,13 @@ type DownloadFileArgs struct { NoProgress bool Stdout bool } + +type UploadFileArgs struct { + Path string + Name string + Parent string + Mime string + Recursive bool + Stdin bool + Share bool +} diff --git a/gdrive.go b/gdrive.go index 13c8a812..cf1c6caa 100644 --- a/gdrive.go +++ b/gdrive.go @@ -13,7 +13,6 @@ const ClientId = "367116221053-7n0vf5akeru7on6o2fjinrecpdoe99eg.apps.googleu const ClientSecret = "1qsNodXNaWq1mQuBjUjmvhoO" const DefaultMaxFiles = 100 -const DefaultChunkSize = 4194304 var DefaultConfigDir = GetDefaultConfigDir() var DefaultTokenFilePath = GetDefaultTokenFilePath() @@ -50,7 +49,7 @@ func main() { }, cli.BoolFlag{ Name: "skipHeader", - Patterns: []string{"--noheader"}, + Patterns: []string{"--no-header"}, Description: "Dont print the header", OmitValue: true, }, @@ -78,7 +77,7 @@ func main() { }, cli.BoolFlag{ Name: "noProgress", - Patterns: []string{"--noprogress"}, + Patterns: []string{"--no-progress"}, Description: "Hide progress", OmitValue: true, }, @@ -94,7 +93,7 @@ func main() { &cli.Handler{ Pattern: "[global options] upload [options] ", Description: "Upload file or directory", - Callback: handler, + Callback: uploadHandler, Flags: cli.Flags{ "global options": globalFlags, "options": []cli.Flag{ @@ -115,9 +114,9 @@ func main() { Description: "Filename", }, cli.BoolFlag{ - Name: "progress", - Patterns: []string{"--progress"}, - Description: "Show progress", + Name: "noProgress", + Patterns: []string{"--no-progress"}, + Description: "Hide progress", OmitValue: true, }, cli.BoolFlag{ @@ -137,18 +136,6 @@ func main() { Description: "Share file", OmitValue: true, }, - cli.BoolFlag{ - Name: "share", - Patterns: []string{"--convert"}, - Description: "Convert file to google docs format", - OmitValue: true, - }, - cli.IntFlag{ - Name: "chunksize", - Patterns: []string{"--chunksize"}, - Description: fmt.Sprintf("Set chunk size in bytes. Minimum is 262144, default is %d", DefaultChunkSize), - DefaultValue: DefaultChunkSize, - }, }, }, }, diff --git a/handlers.go b/handlers.go index a4bb3e2e..426f089f 100644 --- a/handlers.go +++ b/handlers.go @@ -32,6 +32,21 @@ func downloadHandler(ctx cli.Context) { }) } +func uploadHandler(ctx cli.Context) { + args := ctx.Args() + gdrive := newDrive() + + gdrive.Upload(drive.UploadFileArgs{ + Path: args.String("path"), + Name: args.String("name"), + Parent: args.String("parent"), + Mime: args.String("mime"), + Recursive: args.Bool("recursive"), + Stdin: args.Bool("stdin"), + Share: args.Bool("share"), + }) +} + func deleteHandler(ctx cli.Context) { fmt.Println("Deleting...") } @@ -74,6 +89,7 @@ func printCommandHelp(ctx cli.Context) { } } +// TODO: take app path as arg func newDrive() *drive.Drive { oauth, err := client.NewOauthClient(ClientId, ClientSecret, DefaultTokenFilePath, authCodePrompt) if err != nil { From 6e25407d7c1dde508f144802a2147d7c5abd6322 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 16 Jan 2016 20:07:15 +0100 Subject: [PATCH 006/195] errorF --- drive/files.go | 32 ++++++++------------------------ drive/util.go | 10 ++++++++++ 2 files changed, 18 insertions(+), 24 deletions(-) diff --git a/drive/files.go b/drive/files.go index 49cf0342..138202a5 100644 --- a/drive/files.go +++ b/drive/files.go @@ -12,9 +12,7 @@ import ( func (self *Drive) List(args ListFilesArgs) { fileList, err := self.service.Files.List().PageSize(args.MaxFiles).Q(args.Query).Fields("nextPageToken", "files(id,name,size,createdTime)").Do() - if err != nil { - exitF("Failed listing files: %s\n", err.Error()) - } + errorF(err, "Failed listing files: %s\n", err) for _, f := range fileList.Files { fmt.Printf("%s %s %d %s\n", f.Id, f.Name, f.Size, f.CreatedTime) @@ -26,14 +24,10 @@ func (self *Drive) Download(args DownloadFileArgs) { getFile := self.service.Files.Get(args.Id) f, err := getFile.Do() - if err != nil { - exitF("Failed to get file: %s", err.Error()) - } + errorF(err, "Failed to get file: %s", err) res, err := getFile.Download() - if err != nil { - exitF("Failed to download file: %s", err.Error()) - } + errorF(err, "Failed to download file: %s", err) // Close body on function exit defer res.Body.Close() @@ -51,18 +45,14 @@ func (self *Drive) Download(args DownloadFileArgs) { // Create new file outFile, err := os.Create(f.Name) - if err != nil { - exitF("Unable to create new file: %s", err.Error()) - } + errorF(err, "Unable to create new file: %s", err) // Close file on function exit defer outFile.Close() // Save file to disk bytes, err := io.Copy(outFile, res.Body) - if err != nil { - exitF("Failed saving file: %s", err.Error()) - } + errorF(err, "Failed saving file: %s", err) fmt.Printf("Downloaded '%s' at %s, total %d\n", f.Name, "x/s", bytes) @@ -77,14 +67,10 @@ func (self *Drive) Upload(args UploadFileArgs) { //} srcFile, err := os.Open(args.Path) - if err != nil { - exitF("Failed to open file: %s", err.Error()) - } + errorF(err, "Failed to open file: %s", err) srcFileInfo, err := srcFile.Stat() - if err != nil { - exitF("Failed to read file metadata: %s", err.Error()) - } + errorF(err, "Failed to read file metadata: %s", err) // Instantiate empty drive file dstFile := &drive.File{} @@ -109,9 +95,7 @@ func (self *Drive) Upload(args UploadFileArgs) { } f, err := self.service.Files.Create(dstFile).ResumableMedia(context.Background(), srcFile, srcFileInfo.Size(), dstFile.MimeType).Do() - if err != nil { - exitF("Failed to upload file: %s", err.Error()) - } + errorF(err, "Failed to upload file: %s", err) fmt.Printf("Uploaded '%s' at %s, total %d\n", f.Name, "x/s", f.Size) //if args.Share { diff --git a/drive/util.go b/drive/util.go index db48d28c..8dda213b 100644 --- a/drive/util.go +++ b/drive/util.go @@ -5,6 +5,16 @@ import ( "os" ) +func errorF(err error, format string, a ...interface{}) { + if err == nil { + return + } + + fmt.Fprintf(os.Stderr, format, a...) + fmt.Println("") + os.Exit(1) +} + func exitF(format string, a ...interface{}) { fmt.Fprintf(os.Stderr, format, a...) fmt.Println("") From 9eeb5957fb489f5b8ace9b55f653ae92b75c9acb Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 16 Jan 2016 23:19:16 +0100 Subject: [PATCH 007/195] Implemented file info --- drive/files.go | 10 ++++++++ drive/print.go | 69 ++++++++++++++++++++++++++++++++++++++++++++++++++ drive/types.go | 20 +++++++++++++++ gdrive.go | 2 +- handlers.go | 10 ++++++++ 5 files changed, 110 insertions(+), 1 deletion(-) create mode 100644 drive/print.go diff --git a/drive/files.go b/drive/files.go index 138202a5..f1a4b97e 100644 --- a/drive/files.go +++ b/drive/files.go @@ -103,6 +103,16 @@ func (self *Drive) Upload(args UploadFileArgs) { //} } +func (self *Drive) Info(args FileInfoArgs) { + f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "createdTime", "modifiedTime", "md5Checksum", "mimeType", "parents", "shared", "description").Do() + errorF(err, "Failed to get file: %s", err) + + PrintFileInfo(PrintFileInfoArgs{ + File: f, + SizeInBytes: args.SizeInBytes, + }) +} + //func newFile(args UploadFileArgs) *drive.File { // //} diff --git a/drive/print.go b/drive/print.go new file mode 100644 index 00000000..5473690f --- /dev/null +++ b/drive/print.go @@ -0,0 +1,69 @@ +package drive + +import ( + "fmt" + "strings" + "strconv" + "time" +) + + +func PrintFileInfo(args PrintFileInfoArgs) { + f := args.File + + items := []kv{ + kv{"Id", f.Id}, + kv{"Name", f.Name}, + kv{"Description", f.Description}, + kv{"Mime", f.MimeType}, + kv{"Size", formatSize(f.Size, args.SizeInBytes)}, + kv{"Created", formatDatetime(f.CreatedTime)}, + kv{"Modified", formatDatetime(f.ModifiedTime)}, + kv{"Md5sum", f.Md5Checksum}, + kv{"Shared", formatBool(f.Shared)}, + kv{"Parents", formatList(f.Parents)}, + } + + for _, item := range items { + if item.value() != "" { + fmt.Printf("%s: %s\n", item.key(), item.value()) + } + } +} + + +func formatList(a []string) string { + return strings.Join(a, ", ") +} + +func formatSize(bytes int64, forceBytes bool) string { + if forceBytes { + return fmt.Sprintf("%v B", bytes) + } + + units := []string{"B", "KB", "MB", "GB", "TB", "PB"} + + var i int + value := float64(bytes) + + for value > 1000 { + value /= 1000 + i++ + } + return fmt.Sprintf("%.1f %s", value, units[i]) +} + +func formatBool(b bool) string { + return strings.Title(strconv.FormatBool(b)) +} + +func formatDatetime(iso string) string { + t, err := time.Parse(time.RFC3339, iso) + if err != nil { + return iso + } + local := t.Local() + year, month, day := local.Date() + hour, min, sec := local.Clock() + return fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, min, sec) +} diff --git a/drive/types.go b/drive/types.go index 6c1bd147..ecf92a11 100644 --- a/drive/types.go +++ b/drive/types.go @@ -45,3 +45,23 @@ type UploadFileArgs struct { Stdin bool Share bool } + +type FileInfoArgs struct { + Id string + SizeInBytes bool +} + +type PrintFileInfoArgs struct { + File *drive.File + SizeInBytes bool +} + +type kv [2]string + +func (self kv) key() string { + return self[0] +} + +func (self kv) value() string { + return self[1] +} diff --git a/gdrive.go b/gdrive.go index cf1c6caa..e623aa88 100644 --- a/gdrive.go +++ b/gdrive.go @@ -142,7 +142,7 @@ func main() { &cli.Handler{ Pattern: "[global options] info [options] ", Description: "Show file info", - Callback: handler, + Callback: infoHandler, Flags: cli.Flags{ "global options": globalFlags, "options": []cli.Flag{ diff --git a/handlers.go b/handlers.go index 426f089f..f681a2d3 100644 --- a/handlers.go +++ b/handlers.go @@ -47,6 +47,16 @@ func uploadHandler(ctx cli.Context) { }) } +func infoHandler(ctx cli.Context) { + args := ctx.Args() + gdrive := newDrive() + + gdrive.Info(drive.FileInfoArgs{ + Id: args.String("id"), + SizeInBytes: args.Bool("sizeInBytes"), + }) +} + func deleteHandler(ctx cli.Context) { fmt.Println("Deleting...") } From 204037d92075c40ed6b0db127b0617241557411e Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 16 Jan 2016 23:57:24 +0100 Subject: [PATCH 008/195] Format list output --- drive/files.go | 8 +++--- drive/print.go | 68 +++++++++++++++++++++++++++++++++++++++++++++++++- drive/types.go | 6 +++++ 3 files changed, 78 insertions(+), 4 deletions(-) diff --git a/drive/files.go b/drive/files.go index f1a4b97e..b10f2f6e 100644 --- a/drive/files.go +++ b/drive/files.go @@ -14,9 +14,11 @@ func (self *Drive) List(args ListFilesArgs) { fileList, err := self.service.Files.List().PageSize(args.MaxFiles).Q(args.Query).Fields("nextPageToken", "files(id,name,size,createdTime)").Do() errorF(err, "Failed listing files: %s\n", err) - for _, f := range fileList.Files { - fmt.Printf("%s %s %d %s\n", f.Id, f.Name, f.Size, f.CreatedTime) - } + PrintFileList(PrintFileListArgs{ + Files: fileList.Files, + SkipHeader: args.SkipHeader, + SizeInBytes: args.SizeInBytes, + }) } diff --git a/drive/print.go b/drive/print.go index 5473690f..85d51b4d 100644 --- a/drive/print.go +++ b/drive/print.go @@ -1,13 +1,36 @@ package drive import ( + "os" "fmt" + "text/tabwriter" "strings" "strconv" + "unicode/utf8" "time" ) +func PrintFileList(args PrintFileListArgs) { + w := new(tabwriter.Writer) + w.Init(os.Stdout, 0, 0, 3, ' ', 0) + + if !args.SkipHeader { + fmt.Fprintln(w, "Id\tName\tSize\tCreated") + } + + for _, f := range args.Files { + fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", + f.Id, + truncateString(f.Name, 40), + formatSize(f.Size, args.SizeInBytes), + formatDatetime(f.CreatedTime), + ) + } + + w.Flush() +} + func PrintFileInfo(args PrintFileInfoArgs) { f := args.File @@ -31,7 +54,6 @@ func PrintFileInfo(args PrintFileInfoArgs) { } } - func formatList(a []string) string { return strings.Join(a, ", ") } @@ -67,3 +89,47 @@ func formatDatetime(iso string) string { hour, min, sec := local.Clock() return fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, min, sec) } + +// Truncates string to given max length, and inserts ellipsis into +// the middle of the string to signify that the string has been truncated +func truncateString(str string, maxRunes int) string { + indicator := "..." + + // Number of runes in string + runeCount := utf8.RuneCountInString(str) + + // Return input string if length of input string is less than max length + // Input string is also returned if max length is less than 9 which is the minmal supported length + if runeCount <= maxRunes || maxRunes < 9 { + return str + } + + // Number of remaining runes to be removed + remaining := (runeCount - maxRunes) + utf8.RuneCountInString(indicator) + + var truncated string + var skip bool + + for leftOffset, char := range str { + rightOffset := runeCount - (leftOffset + remaining) + + // Start skipping chars when the left and right offsets are equal + // Or in the case where we wont be able to do an even split: when the left offset is larger than the right offset + if leftOffset == rightOffset || (leftOffset > rightOffset && !skip) { + skip = true + truncated += indicator + } + + if skip && remaining > 0 { + // Skip char and decrement the remaining skip counter + remaining-- + continue + } + + // Add char to result string + truncated += string(char) + } + + // Return truncated string + return truncated +} diff --git a/drive/types.go b/drive/types.go index ecf92a11..296902cc 100644 --- a/drive/types.go +++ b/drive/types.go @@ -51,6 +51,12 @@ type FileInfoArgs struct { SizeInBytes bool } +type PrintFileListArgs struct { + Files []*drive.File + SkipHeader bool + SizeInBytes bool +} + type PrintFileInfoArgs struct { File *drive.File SizeInBytes bool From d8496025102c4586002a76ce3840ac71a40b8a5c Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 17 Jan 2016 01:33:48 +0100 Subject: [PATCH 009/195] Add default query --- gdrive.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/gdrive.go b/gdrive.go index e623aa88..152cf1c9 100644 --- a/gdrive.go +++ b/gdrive.go @@ -13,6 +13,7 @@ const ClientId = "367116221053-7n0vf5akeru7on6o2fjinrecpdoe99eg.apps.googleu const ClientSecret = "1qsNodXNaWq1mQuBjUjmvhoO" const DefaultMaxFiles = 100 +const DefaultQuery = "trashed = false and 'me' in owners" var DefaultConfigDir = GetDefaultConfigDir() var DefaultTokenFilePath = GetDefaultTokenFilePath() @@ -45,7 +46,8 @@ func main() { cli.StringFlag{ Name: "query", Patterns: []string{"-q", "--query"}, - Description: "Query, see https://developers.google.com/drive/search-parameters", + Description: fmt.Sprintf(`Default query: "%s". See https://developers.google.com/drive/search-parameters`, DefaultQuery), + DefaultValue: DefaultQuery, }, cli.BoolFlag{ Name: "skipHeader", From 8f86e06cceb244c82912f2876fc9f1f2d7239162 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 17 Jan 2016 01:35:06 +0100 Subject: [PATCH 010/195] DefaultMaxFiles 30 --- gdrive.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gdrive.go b/gdrive.go index 152cf1c9..c065566a 100644 --- a/gdrive.go +++ b/gdrive.go @@ -12,7 +12,7 @@ const Version = "2.0.0" const ClientId = "367116221053-7n0vf5akeru7on6o2fjinrecpdoe99eg.apps.googleusercontent.com" const ClientSecret = "1qsNodXNaWq1mQuBjUjmvhoO" -const DefaultMaxFiles = 100 +const DefaultMaxFiles = 30 const DefaultQuery = "trashed = false and 'me' in owners" var DefaultConfigDir = GetDefaultConfigDir() From 2726ba76fe0f6b1e1f3ea9a45465e9d0ec3a5758 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 17 Jan 2016 11:03:32 +0100 Subject: [PATCH 011/195] Add name-width flag --- drive/files.go | 1 + drive/print.go | 2 +- drive/types.go | 2 ++ gdrive.go | 7 +++++++ handlers.go | 1 + 5 files changed, 12 insertions(+), 1 deletion(-) diff --git a/drive/files.go b/drive/files.go index b10f2f6e..6d662348 100644 --- a/drive/files.go +++ b/drive/files.go @@ -16,6 +16,7 @@ func (self *Drive) List(args ListFilesArgs) { PrintFileList(PrintFileListArgs{ Files: fileList.Files, + NameWidth: int(args.NameWidth), SkipHeader: args.SkipHeader, SizeInBytes: args.SizeInBytes, }) diff --git a/drive/print.go b/drive/print.go index 85d51b4d..eaabae32 100644 --- a/drive/print.go +++ b/drive/print.go @@ -22,7 +22,7 @@ func PrintFileList(args PrintFileListArgs) { for _, f := range args.Files { fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", f.Id, - truncateString(f.Name, 40), + truncateString(f.Name, args.NameWidth), formatSize(f.Size, args.SizeInBytes), formatDatetime(f.CreatedTime), ) diff --git a/drive/types.go b/drive/types.go index 296902cc..d3f09693 100644 --- a/drive/types.go +++ b/drive/types.go @@ -24,6 +24,7 @@ func NewDrive(client Client) *Drive { type ListFilesArgs struct { MaxFiles int64 + NameWidth int64 Query string SkipHeader bool SizeInBytes bool @@ -53,6 +54,7 @@ type FileInfoArgs struct { type PrintFileListArgs struct { Files []*drive.File + NameWidth int SkipHeader bool SizeInBytes bool } diff --git a/gdrive.go b/gdrive.go index c065566a..f5f7edb5 100644 --- a/gdrive.go +++ b/gdrive.go @@ -13,6 +13,7 @@ const ClientId = "367116221053-7n0vf5akeru7on6o2fjinrecpdoe99eg.apps.googleu const ClientSecret = "1qsNodXNaWq1mQuBjUjmvhoO" const DefaultMaxFiles = 30 +const DefaultNameWidth = 40 const DefaultQuery = "trashed = false and 'me' in owners" var DefaultConfigDir = GetDefaultConfigDir() @@ -49,6 +50,12 @@ func main() { Description: fmt.Sprintf(`Default query: "%s". See https://developers.google.com/drive/search-parameters`, DefaultQuery), DefaultValue: DefaultQuery, }, + cli.IntFlag{ + Name: "nameWidth", + Patterns: []string{"--name-width"}, + Description: fmt.Sprintf("Width of name column, default: %d, minimum: 9, use 0 for full width", DefaultNameWidth), + DefaultValue: DefaultNameWidth, + }, cli.BoolFlag{ Name: "skipHeader", Patterns: []string{"--no-header"}, diff --git a/handlers.go b/handlers.go index f681a2d3..59607dad 100644 --- a/handlers.go +++ b/handlers.go @@ -14,6 +14,7 @@ func listHandler(ctx cli.Context) { gdrive.List(drive.ListFilesArgs{ MaxFiles: args.Int64("maxFiles"), + NameWidth: args.Int64("nameWidth"), Query: args.String("query"), SkipHeader: args.Bool("skipHeader"), SizeInBytes: args.Bool("sizeInBytes"), From 9e3c856daf642e914891b64d43ba3e579093a8dc Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 17 Jan 2016 11:21:47 +0100 Subject: [PATCH 012/195] Respect configDir flag --- gdrive.go | 5 ----- handlers.go | 20 +++++++++++++------- util.go | 4 ++-- 3 files changed, 15 insertions(+), 14 deletions(-) diff --git a/gdrive.go b/gdrive.go index f5f7edb5..a4a50496 100644 --- a/gdrive.go +++ b/gdrive.go @@ -9,15 +9,10 @@ import ( const Name = "gdrive" const Version = "2.0.0" -const ClientId = "367116221053-7n0vf5akeru7on6o2fjinrecpdoe99eg.apps.googleusercontent.com" -const ClientSecret = "1qsNodXNaWq1mQuBjUjmvhoO" - const DefaultMaxFiles = 30 const DefaultNameWidth = 40 const DefaultQuery = "trashed = false and 'me' in owners" - var DefaultConfigDir = GetDefaultConfigDir() -var DefaultTokenFilePath = GetDefaultTokenFilePath() func main() { diff --git a/handlers.go b/handlers.go index 59607dad..b09159f7 100644 --- a/handlers.go +++ b/handlers.go @@ -8,9 +8,14 @@ import ( "./drive" ) +const ClientId = "367116221053-7n0vf5akeru7on6o2fjinrecpdoe99eg.apps.googleusercontent.com" +const ClientSecret = "1qsNodXNaWq1mQuBjUjmvhoO" +const TokenFilename = "token_v2.json" + + func listHandler(ctx cli.Context) { args := ctx.Args() - gdrive := newDrive() + gdrive := newDrive(args) gdrive.List(drive.ListFilesArgs{ MaxFiles: args.Int64("maxFiles"), @@ -23,7 +28,7 @@ func listHandler(ctx cli.Context) { func downloadHandler(ctx cli.Context) { args := ctx.Args() - gdrive := newDrive() + gdrive := newDrive(args) gdrive.Download(drive.DownloadFileArgs{ Id: args.String("id"), @@ -35,7 +40,7 @@ func downloadHandler(ctx cli.Context) { func uploadHandler(ctx cli.Context) { args := ctx.Args() - gdrive := newDrive() + gdrive := newDrive(args) gdrive.Upload(drive.UploadFileArgs{ Path: args.String("path"), @@ -50,7 +55,7 @@ func uploadHandler(ctx cli.Context) { func infoHandler(ctx cli.Context) { args := ctx.Args() - gdrive := newDrive() + gdrive := newDrive(args) gdrive.Info(drive.FileInfoArgs{ Id: args.String("id"), @@ -100,9 +105,10 @@ func printCommandHelp(ctx cli.Context) { } } -// TODO: take app path as arg -func newDrive() *drive.Drive { - oauth, err := client.NewOauthClient(ClientId, ClientSecret, DefaultTokenFilePath, authCodePrompt) +func newDrive(args cli.Arguments) *drive.Drive { + configDir := args.String("configDir") + tokenPath := ConfigFilePath(configDir, TokenFilename) + oauth, err := client.NewOauthClient(ClientId, ClientSecret, tokenPath, authCodePrompt) if err != nil { ExitF("Failed getting oauth client: %s", err.Error()) } diff --git a/util.go b/util.go index f5cec715..7f43bd2a 100644 --- a/util.go +++ b/util.go @@ -11,8 +11,8 @@ func GetDefaultConfigDir() string { return filepath.Join(Homedir(), ".gdrive") } -func GetDefaultTokenFilePath() string { - return filepath.Join(GetDefaultConfigDir(), "token_v2.json") +func ConfigFilePath(basePath, name string) string { + return filepath.Join(basePath, name) } func Homedir() string { From 9e19c833c2495c52fe1af547a2eecb2693136a9c Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 17 Jan 2016 11:56:55 +0100 Subject: [PATCH 013/195] mkdir implementation --- .gitignore | 1 + drive/files.go | 21 +++++++++++++++++++++ drive/print.go | 4 ++++ drive/types.go | 6 ++++++ gdrive.go | 2 +- handlers.go | 22 ++++++++++++++-------- 6 files changed, 47 insertions(+), 9 deletions(-) diff --git a/.gitignore b/.gitignore index 36d220a7..b0c6c0e7 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,4 @@ _release/bin Session.vim .netrwhist drive_old +foo.txt diff --git a/drive/files.go b/drive/files.go index 6d662348..8a5c7f4a 100644 --- a/drive/files.go +++ b/drive/files.go @@ -10,6 +10,8 @@ import ( "golang.org/x/net/context" ) +const DirectoryMimeType = "application/vnd.google-apps.folder" + func (self *Drive) List(args ListFilesArgs) { fileList, err := self.service.Files.List().PageSize(args.MaxFiles).Q(args.Query).Fields("nextPageToken", "files(id,name,size,createdTime)").Do() errorF(err, "Failed listing files: %s\n", err) @@ -116,6 +118,25 @@ func (self *Drive) Info(args FileInfoArgs) { }) } +func (self *Drive) Mkdir(args MkdirArgs) { + dstFile := &drive.File{Name: args.Name, MimeType: DirectoryMimeType} + + // Set parent folder if provided + if args.Parent != "" { + dstFile.Parents = []string{args.Parent} + } + + // Create folder + f, err := self.service.Files.Create(dstFile).Do() + errorF(err, "Failed to create folder: %s", err) + + PrintFileInfo(PrintFileInfoArgs{File: f}) + + //if args.Share { + // self.Share(TODO) + //} +} + //func newFile(args UploadFileArgs) *drive.File { // //} diff --git a/drive/print.go b/drive/print.go index eaabae32..84d4ec76 100644 --- a/drive/print.go +++ b/drive/print.go @@ -59,6 +59,10 @@ func formatList(a []string) string { } func formatSize(bytes int64, forceBytes bool) string { + if bytes == 0 { + return "" + } + if forceBytes { return fmt.Sprintf("%v B", bytes) } diff --git a/drive/types.go b/drive/types.go index d3f09693..6fa2f3a2 100644 --- a/drive/types.go +++ b/drive/types.go @@ -52,6 +52,12 @@ type FileInfoArgs struct { SizeInBytes bool } +type MkdirArgs struct { + Name string + Parent string + Share bool +} + type PrintFileListArgs struct { Files []*drive.File NameWidth int diff --git a/gdrive.go b/gdrive.go index a4a50496..2bef468e 100644 --- a/gdrive.go +++ b/gdrive.go @@ -162,7 +162,7 @@ func main() { &cli.Handler{ Pattern: "[global options] mkdir [options] ", Description: "Create directory", - Callback: handler, + Callback: mkdirHandler, Flags: cli.Flags{ "global options": globalFlags, "options": []cli.Flag{ diff --git a/handlers.go b/handlers.go index b09159f7..64e4fcdf 100644 --- a/handlers.go +++ b/handlers.go @@ -15,9 +15,8 @@ const TokenFilename = "token_v2.json" func listHandler(ctx cli.Context) { args := ctx.Args() - gdrive := newDrive(args) - gdrive.List(drive.ListFilesArgs{ + newDrive(args).List(drive.ListFilesArgs{ MaxFiles: args.Int64("maxFiles"), NameWidth: args.Int64("nameWidth"), Query: args.String("query"), @@ -28,9 +27,8 @@ func listHandler(ctx cli.Context) { func downloadHandler(ctx cli.Context) { args := ctx.Args() - gdrive := newDrive(args) - gdrive.Download(drive.DownloadFileArgs{ + newDrive(args).Download(drive.DownloadFileArgs{ Id: args.String("id"), Force: args.Bool("force"), Stdout: args.Bool("stdout"), @@ -40,9 +38,8 @@ func downloadHandler(ctx cli.Context) { func uploadHandler(ctx cli.Context) { args := ctx.Args() - gdrive := newDrive(args) - gdrive.Upload(drive.UploadFileArgs{ + newDrive(args).Upload(drive.UploadFileArgs{ Path: args.String("path"), Name: args.String("name"), Parent: args.String("parent"), @@ -55,14 +52,23 @@ func uploadHandler(ctx cli.Context) { func infoHandler(ctx cli.Context) { args := ctx.Args() - gdrive := newDrive(args) - gdrive.Info(drive.FileInfoArgs{ + newDrive(args).Info(drive.FileInfoArgs{ Id: args.String("id"), SizeInBytes: args.Bool("sizeInBytes"), }) } +func mkdirHandler(ctx cli.Context) { + args := ctx.Args() + + newDrive(args).Mkdir(drive.MkdirArgs{ + Name: args.String("name"), + Parent: args.String("parent"), + Share: args.Bool("share"), + }) +} + func deleteHandler(ctx cli.Context) { fmt.Println("Deleting...") } From 0f9535df1b2a64e8f895671025bf7823e9391135 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 17 Jan 2016 13:33:19 +0100 Subject: [PATCH 014/195] Implement share file --- drive/files.go | 5 +---- drive/permissions.go | 47 ++++++++++++++++++++++++++++++++++++++++++++ drive/types.go | 9 +++++++++ gdrive.go | 31 ++++++++++++++++++++++++++--- handlers.go | 13 ++++++++++++ 5 files changed, 98 insertions(+), 7 deletions(-) create mode 100644 drive/permissions.go diff --git a/drive/files.go b/drive/files.go index 8a5c7f4a..b6eed04f 100644 --- a/drive/files.go +++ b/drive/files.go @@ -12,6 +12,7 @@ import ( const DirectoryMimeType = "application/vnd.google-apps.folder" + func (self *Drive) List(args ListFilesArgs) { fileList, err := self.service.Files.List().PageSize(args.MaxFiles).Q(args.Query).Fields("nextPageToken", "files(id,name,size,createdTime)").Do() errorF(err, "Failed listing files: %s\n", err) @@ -136,7 +137,3 @@ func (self *Drive) Mkdir(args MkdirArgs) { // self.Share(TODO) //} } - -//func newFile(args UploadFileArgs) *drive.File { -// -//} diff --git a/drive/permissions.go b/drive/permissions.go new file mode 100644 index 00000000..1e2c004a --- /dev/null +++ b/drive/permissions.go @@ -0,0 +1,47 @@ +package drive + +import ( + "fmt" + "google.golang.org/api/drive/v3" +) + + +func (self *Drive) Share(args ShareArgs) { + if args.Revoke { + err := self.deletePermissions(args) + errorF(err, "Failed delete permissions: %s", err) + } + + permission := &drive.Permission{ + AllowFileDiscovery: args.Discoverable, + Role: args.Role, + Type: args.Type, + EmailAddress: args.Email, + } + + p, err := self.service.Permissions.Create(args.FileId, permission).Do() + errorF(err, "Failed share file: %s", err) + + fmt.Println(p) +} + +func (self *Drive) deletePermissions(args ShareArgs) error { + permList, err := self.service.Permissions.List(args.FileId).Do() + if err != nil { + return err + } + + for _, p := range permList.Permissions { + // Skip owner permissions + if p.Role == "owner" { + continue + } + + err := self.service.Permissions.Delete(args.FileId, p.Id).Do() + if err != nil { + return err + } + } + + return nil +} diff --git a/drive/types.go b/drive/types.go index 6fa2f3a2..f252ddef 100644 --- a/drive/types.go +++ b/drive/types.go @@ -58,6 +58,15 @@ type MkdirArgs struct { Share bool } +type ShareArgs struct { + FileId string + Role string + Type string + Email string + Discoverable bool + Revoke bool +} + type PrintFileListArgs struct { Files []*drive.File NameWidth int diff --git a/gdrive.go b/gdrive.go index 2bef468e..3f9ec90b 100644 --- a/gdrive.go +++ b/gdrive.go @@ -12,6 +12,8 @@ const Version = "2.0.0" const DefaultMaxFiles = 30 const DefaultNameWidth = 40 const DefaultQuery = "trashed = false and 'me' in owners" +const DefaultShareRole = "reader" +const DefaultShareType = "anyone" var DefaultConfigDir = GetDefaultConfigDir() @@ -181,16 +183,39 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global options] share ", + Pattern: "[global options] share [options] ", Description: "Share file or directory", - Callback: handler, + Callback: shareHandler, Flags: cli.Flags{ "global options": globalFlags, "options": []cli.Flag{ + cli.BoolFlag{ + Name: "discoverable", + Patterns: []string{"--discoverable"}, + Description: "Make file discoverable by search engines", + OmitValue: true, + }, + cli.StringFlag{ + Name: "role", + Patterns: []string{"--role"}, + Description: fmt.Sprintf("Share role. Default: %s", DefaultShareRole), + DefaultValue: DefaultShareRole, + }, + cli.StringFlag{ + Name: "type", + Patterns: []string{"--type"}, + Description: fmt.Sprintf("Share type. Default: %s", DefaultShareType), + DefaultValue: DefaultShareType, + }, + cli.StringFlag{ + Name: "email", + Patterns: []string{"--email"}, + Description: "The email address of the user or group to share the file with. Requires 'user' or 'group' as type", + }, cli.BoolFlag{ Name: "revoke", Patterns: []string{"--revoke"}, - Description: "Unshare file or directory", + Description: "Delete all sharing permissions", OmitValue: true, }, }, diff --git a/handlers.go b/handlers.go index 64e4fcdf..3d90dc7e 100644 --- a/handlers.go +++ b/handlers.go @@ -69,6 +69,19 @@ func mkdirHandler(ctx cli.Context) { }) } +func shareHandler(ctx cli.Context) { + args := ctx.Args() + + newDrive(args).Share(drive.ShareArgs{ + FileId: args.String("id"), + Role: args.String("role"), + Type: args.String("type"), + Email: args.String("email"), + Discoverable: args.Bool("discoverable"), + Revoke: args.Bool("revoke"), + }) +} + func deleteHandler(ctx cli.Context) { fmt.Println("Deleting...") } From f16b89b6f6bee6023c51b4f8120a3e4776128384 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 17 Jan 2016 13:41:40 +0100 Subject: [PATCH 015/195] Delete stale files --- auth/auth.go | 73 ---------------- config/config.go | 74 ---------------- drive.go | 170 ------------------------------------ gdrive/gdrive.go | 53 ----------- gdrive/handlers.go | 0 util/drive.go | 88 ------------------- util/generic.go | 213 --------------------------------------------- 7 files changed, 671 deletions(-) delete mode 100644 auth/auth.go delete mode 100644 config/config.go delete mode 100644 drive.go delete mode 100644 gdrive/gdrive.go delete mode 100644 gdrive/handlers.go delete mode 100644 util/drive.go delete mode 100644 util/generic.go diff --git a/auth/auth.go b/auth/auth.go deleted file mode 100644 index 6bb14d63..00000000 --- a/auth/auth.go +++ /dev/null @@ -1,73 +0,0 @@ -package auth - -import ( - "code.google.com/p/goauth2/oauth" - "errors" - "fmt" - "github.com/prasmussen/gdrive/util" - "net/http" -) - -// Get auth code from user -func promptUserForAuthCode(config *oauth.Config) string { - authUrl := config.AuthCodeURL("state") - fmt.Println("Go to the following link in your browser:") - fmt.Printf("%v\n\n", authUrl) - return util.Prompt("Enter verification code: ") -} - -// Returns true if we have a valid cached token -func hasValidToken(cacheFile oauth.CacheFile, transport *oauth.Transport) bool { - // Check if we have a cached token - token, err := cacheFile.Token() - if err != nil { - return false - } - - // Refresh token if its expired - if token.Expired() { - transport.Token = token - err = transport.Refresh() - if err != nil { - fmt.Println(err) - return false - } - } - return true -} - -func GetOauth2Client(clientId, clientSecret, cachePath string, promptUser bool) (*http.Client, error) { - cacheFile := oauth.CacheFile(cachePath) - - config := &oauth.Config{ - ClientId: clientId, - ClientSecret: clientSecret, - Scope: "https://www.googleapis.com/auth/drive", - RedirectURL: "urn:ietf:wg:oauth:2.0:oob", - AuthURL: "https://accounts.google.com/o/oauth2/auth", - TokenURL: "https://accounts.google.com/o/oauth2/token", - TokenCache: cacheFile, - } - - transport := &oauth.Transport{ - Config: config, - Transport: http.DefaultTransport, - } - - // Return client if we have a valid token - if hasValidToken(cacheFile, transport) { - return transport.Client(), nil - } - - if !promptUser { - return nil, errors.New("no valid token found") - } - - // Get auth code from user and request a new token - code := promptUserForAuthCode(config) - _, err := transport.Exchange(code) - if err != nil { - return nil, err - } - return transport.Client(), nil -} diff --git a/config/config.go b/config/config.go deleted file mode 100644 index 76d22853..00000000 --- a/config/config.go +++ /dev/null @@ -1,74 +0,0 @@ -package config - -import ( - "encoding/json" - "fmt" - "github.com/prasmussen/gdrive/util" - "io/ioutil" -) - -// Client ID and secrect for installed applications -const ( - ClientId = "367116221053-7n0vf5akeru7on6o2fjinrecpdoe99eg.apps.googleusercontent.com" - ClientSecret = "1qsNodXNaWq1mQuBjUjmvhoO" -) - -type Config struct { - ClientId string - ClientSecret string -} - -func defaultConfig() *Config { - return &Config{ - ClientId: ClientId, - ClientSecret: ClientSecret, - } -} - -func promptUser() *Config { - return &Config{ - ClientId: util.Prompt("Enter Client Id: "), - ClientSecret: util.Prompt("Enter Client Secret: "), - } -} - -func load(fname string) (*Config, error) { - data, err := ioutil.ReadFile(fname) - if err != nil { - return nil, err - } - config := &Config{} - return config, json.Unmarshal(data, config) -} - -func save(fname string, config *Config) error { - data, err := json.MarshalIndent(config, "", " ") - if err != nil { - return err - } - - if err = util.Mkdir(fname); err != nil { - return err - } - return ioutil.WriteFile(fname, data, 0600) -} - -func Load(fname string, advancedUser bool) *Config { - config, err := load(fname) - if err != nil { - // Unable to read existing config, lets start from scracth - // Get config from user input for advanced users, or just use default settings - if advancedUser { - config = promptUser() - } else { - config = defaultConfig() - } - - // Save config to file - err := save(fname, config) - if err != nil { - fmt.Printf("Failed to save config (%s)\n", err) - } - } - return config -} diff --git a/drive.go b/drive.go deleted file mode 100644 index 2466087c..00000000 --- a/drive.go +++ /dev/null @@ -1,170 +0,0 @@ -package foo - -import ( - "fmt" - "github.com/prasmussen/gdrive/cli" - "github.com/prasmussen/gdrive/gdrive" - "github.com/prasmussen/gdrive/util" - "github.com/prasmussen/google-api-go-client/googleapi" - "github.com/voxelbrain/goptions" - "os" -) - -const ( - VersionNumber = "1.9.0" -) - -type Options struct { - Advanced bool `goptions:"-a, --advanced, description='Advanced Mode -- lets you specify your own oauth client id and secret on setup'"` - AppPath string `goptions:"-c, --config, description='Set application path where config and token is stored. Defaults to ~/.gdrive'"` - Version bool `goptions:"-v, --version, description='Print version'"` - goptions.Help `goptions:"-h, --help, description='Show this help'"` - - goptions.Verbs - - List struct { - MaxResults int `goptions:"-m, --max, description='Max results'"` - IncludeDocs bool `goptions:"--include-docs, description='Include google docs in listing'"` - TitleFilter string `goptions:"-t, --title, mutexgroup='query', description='Title filter'"` - Query string `goptions:"-q, --query, mutexgroup='query', description='Query (see https://developers.google.com/drive/search-parameters)'"` - SharedStatus bool `goptions:"-s, --shared, description='Show shared status (Note: this will generate 1 http req per file)'"` - NoHeader bool `goptions:"-n, --noheader, description='Do not show the header'"` - SizeInBytes bool `goptions:"--bytes, description='Show size in bytes'"` - } `goptions:"list"` - - Info struct { - FileId string `goptions:"-i, --id, obligatory, description='File Id'"` - SizeInBytes bool `goptions:"--bytes, description='Show size in bytes'"` - } `goptions:"info"` - - Folder struct { - Title string `goptions:"-t, --title, obligatory, description='Folder to create'"` - ParentId string `goptions:"-p, --parent, description='Parent Id of the folder'"` - Share bool `goptions:"--share, description='Share created folder'"` - } `goptions:"folder"` - - Upload struct { - File *os.File `goptions:"-f, --file, mutexgroup='input', obligatory, rdonly, description='File or directory to upload'"` - Stdin bool `goptions:"-s, --stdin, mutexgroup='input', obligatory, description='Use stdin as file content'"` - Title string `goptions:"-t, --title, description='Title to give uploaded file. Defaults to filename'"` - ParentId string `goptions:"-p, --parent, description='Parent Id of the file'"` - Share bool `goptions:"--share, description='Share uploaded file'"` - MimeType string `goptions:"--mimetype, description='The MIME type (default will try to figure it out)'"` - Convert bool `goptions:"--convert, description='File will be converted to Google Docs format'"` - ChunkSize int64 `goptions:"-C, --chunksize, description='Set chunk size in bytes. Minimum is 262144, default is 4194304. Recommended to be a power of two.'"` - } `goptions:"upload"` - - Download struct { - FileId string `goptions:"-i, --id, mutexgroup='download', obligatory, description='File Id'"` - Format string `goptions:"--format, description='Download file in a specified format (needed for google docs)'"` - Stdout bool `goptions:"-s, --stdout, description='Write file content to stdout'"` - Force bool `goptions:"--force, description='Overwrite existing file'"` - Pop bool `goptions:"--pop, mutexgroup='download', description='Download latest file, and remove it from google drive'"` - } `goptions:"download"` - - Delete struct { - FileId string `goptions:"-i, --id, obligatory, description='File Id'"` - } `goptions:"delete"` - - Share struct { - FileId string `goptions:"-i, --id, obligatory, description='File Id'"` - } `goptions:"share"` - - Unshare struct { - FileId string `goptions:"-i, --id, obligatory, description='File Id'"` - } `goptions:"unshare"` - - Url struct { - FileId string `goptions:"-i, --id, obligatory, description='File Id'"` - Preview bool `goptions:"-p, --preview, mutexgroup='urltype', description='Generate preview url (default)'"` - Download bool `goptions:"-d, --download, mutexgroup='urltype', description='Generate download url'"` - } `goptions:"url"` - - Quota struct { - SizeInBytes bool `goptions:"--bytes, description='Show size in bytes'"` - } `goptions:"quota"` -} - -func main() { - opts := &Options{} - goptions.ParseAndFail(opts) - - // Print version number and exit if the version flag is set - if opts.Version { - fmt.Printf("gdrive v%s\n", VersionNumber) - return - } - - // Get authorized drive client - drive, err := gdrive.New(opts.AppPath, opts.Advanced, true) - if err != nil { - writeError("An error occurred creating Drive client: %v\n", err) - } - - switch opts.Verbs { - case "list": - args := opts.List - err = cli.List(drive, args.Query, args.TitleFilter, args.MaxResults, args.SharedStatus, args.NoHeader, args.IncludeDocs, args.SizeInBytes) - - case "info": - err = cli.Info(drive, opts.Info.FileId, opts.Info.SizeInBytes) - - case "folder": - args := opts.Folder - err = cli.Folder(drive, args.Title, args.ParentId, args.Share) - - case "upload": - args := opts.Upload - - // Set custom chunksize if given - if args.ChunkSize >= (1 << 18) { - googleapi.SetChunkSize(args.ChunkSize) - } - - if args.Stdin { - err = cli.UploadStdin(drive, os.Stdin, args.Title, args.ParentId, args.Share, args.MimeType, args.Convert) - } else { - err = cli.Upload(drive, args.File, args.Title, args.ParentId, args.Share, args.MimeType, args.Convert) - } - - case "download": - args := opts.Download - if args.Pop { - err = cli.DownloadLatest(drive, args.Stdout, args.Format, args.Force) - } else { - err = cli.Download(drive, args.FileId, args.Stdout, false, args.Format, args.Force) - } - - case "delete": - err = cli.Delete(drive, opts.Delete.FileId) - - case "share": - err = cli.Share(drive, opts.Share.FileId) - - case "unshare": - err = cli.Unshare(drive, opts.Unshare.FileId) - - case "url": - if opts.Url.Download { - fmt.Println(util.DownloadUrl(opts.Url.FileId)) - } else { - fmt.Println(util.PreviewUrl(opts.Url.FileId)) - } - - case "quota": - err = cli.Quota(drive, opts.Quota.SizeInBytes) - - default: - goptions.PrintHelp() - } - - if err != nil { - writeError("%s", err) - } -} - -func writeError(format string, err error) { - fmt.Fprintf(os.Stderr, format, err) - fmt.Print("\n") - os.Exit(1) -} diff --git a/gdrive/gdrive.go b/gdrive/gdrive.go deleted file mode 100644 index 1f8f4c7f..00000000 --- a/gdrive/gdrive.go +++ /dev/null @@ -1,53 +0,0 @@ -package gdrive - -import ( - "github.com/prasmussen/google-api-go-client/drive/v2" - "github.com/prasmussen/gdrive/auth" - "github.com/prasmussen/gdrive/config" - "github.com/prasmussen/gdrive/util" - "net/http" - "path/filepath" -) - -// File paths and names -var ( - AppPath = filepath.Join(util.Homedir(), ".gdrive") - ConfigFname = "config.json" - TokenFname = "token.json" - //ConfigPath = filepath.Join(ConfigDir, "config.json") - //TokenPath = filepath.Join(ConfigDir, "token.json") -) - -type Drive struct { - *drive.Service - client *http.Client -} - -// Returns the raw http client which has the oauth transport -func (self *Drive) Client() *http.Client { - return self.client -} - -func New(customAppPath string, advancedMode bool, promptUser bool) (*Drive, error) { - if customAppPath != "" { - AppPath = customAppPath - } - - // Build paths to config files - configPath := filepath.Join(AppPath, ConfigFname) - tokenPath := filepath.Join(AppPath, TokenFname) - - config := config.Load(configPath, advancedMode) - client, err := auth.GetOauth2Client(config.ClientId, config.ClientSecret, tokenPath, promptUser) - if err != nil { - return nil, err - } - - drive, err := drive.New(client) - if err != nil { - return nil, err - } - - // Return a new authorized Drive client. - return &Drive{drive, client}, nil -} diff --git a/gdrive/handlers.go b/gdrive/handlers.go deleted file mode 100644 index e69de29b..00000000 diff --git a/util/drive.go b/util/drive.go deleted file mode 100644 index a8e77773..00000000 --- a/util/drive.go +++ /dev/null @@ -1,88 +0,0 @@ -package util - -import ( - "fmt" - "github.com/prasmussen/google-api-go-client/drive/v2" - "strings" -) - -func PreviewUrl(id string) string { - //return fmt.Sprintf("https://drive.google.com/uc?id=%s&export=preview", id) - return fmt.Sprintf("https://drive.google.com/uc?id=%s", id) -} - -// Note to self: file.WebContentLink = https://docs.google.com/uc?id=&export=download -func DownloadUrl(id string) string { - return fmt.Sprintf("https://drive.google.com/uc?id=%s&export=download", id) -} - -func ParentList(parents []*drive.ParentReference) string { - ids := make([]string, 0) - for _, parent := range parents { - ids = append(ids, parent.Id) - } - - return strings.Join(ids, ", ") -} - -func InternalDownloadUrlAndExtension(info *drive.File, format string) (downloadUrl string, extension string, err error) { - // Make a list of available mime types for this file - availableMimeTypes := make([]string, 0) - for mime, _ := range info.ExportLinks { - availableMimeTypes = append(availableMimeTypes, mime) - } - - mimeExtensions := map[string]string{ - "application/vnd.openxmlformats-officedocument.wordprocessingml.document": "docx", - "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": "xlsx", - "application/application/vnd.openxmlformats-officedocument.presentationml.presentation": "pptx", - "application/vnd.oasis.opendocument.text": "odf", - "application/x-vnd.oasis.opendocument.spreadsheet": "ods", - "application/pdf": "pdf", - "application/rtf": "rtf", - "text/csv": "csv", - "text/html": "html", - "text/plain": "txt", - "application/vnd.google-apps.script+json": "json", - } - - // Make a list of available formats for this file - availableFormats := make([]string, 0) - for _, mime := range availableMimeTypes { - if ext, ok := mimeExtensions[mime]; ok { - availableFormats = append(availableFormats, ext) - } - } - - // Return DownloadUrl if no format is specified - if format == "" { - if info.DownloadUrl == "" { - if len(availableFormats) > 0 { - return "", "", fmt.Errorf("A format needs to be specified to download this file (--format). Available formats: %s", strings.Join(availableFormats, ", ")) - } else { - return "", "", fmt.Errorf("Download is not supported for this filetype") - } - } - return info.DownloadUrl, "", nil - } - - // Ensure that the specified format is available - if !inArray(format, availableFormats) { - if len(availableFormats) > 0 { - return "", "", fmt.Errorf("Invalid format. Available formats: %s", strings.Join(availableFormats, ", ")) - } else { - return "", "", fmt.Errorf("No export formats are available for this file") - } - } - - // Grab download url - for mime, f := range mimeExtensions { - if f == format { - downloadUrl = info.ExportLinks[mime] - break - } - } - - extension = "." + format - return -} diff --git a/util/generic.go b/util/generic.go deleted file mode 100644 index ded985d2..00000000 --- a/util/generic.go +++ /dev/null @@ -1,213 +0,0 @@ -package util - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// Prompt user to input data -func Prompt(msg string) string { - fmt.Printf(msg) - var str string - fmt.Scanln(&str) - return str -} - -// Returns true if file/directory exists -func FileExists(path string) bool { - _, err := os.Stat(path) - if err == nil { - return true - } - return false -} - -func Mkdir(path string) error { - dir := filepath.Dir(path) - if FileExists(dir) { - return nil - } - return os.Mkdir(dir, 0700) -} - -// Returns the users home dir -func Homedir() string { - if runtime.GOOS == "windows" { - return os.Getenv("APPDATA") - } - return os.Getenv("HOME") -} - -func FormatBool(b bool) string { - return strings.Title(strconv.FormatBool(b)) -} - -func FileSizeFormat(bytes int64, forceBytes bool) string { - if forceBytes { - return fmt.Sprintf("%v B", bytes) - } - - units := []string{"B", "KB", "MB", "GB", "TB", "PB"} - - var i int - value := float64(bytes) - - for value > 1000 { - value /= 1000 - i++ - } - return fmt.Sprintf("%.1f %s", value, units[i]) -} - -// Truncates string to given max length, and inserts ellipsis into -// the middle of the string to signify that the string has been truncated -func TruncateString(str string, maxRunes int) string { - indicator := "..." - - // Number of runes in string - runeCount := utf8.RuneCountInString(str) - - // Return input string if length of input string is less than max length - // Input string is also returned if max length is less than 9 which is the minmal supported length - if runeCount <= maxRunes || maxRunes < 9 { - return str - } - - // Number of remaining runes to be removed - remaining := (runeCount - maxRunes) + utf8.RuneCountInString(indicator) - - var truncated string - var skip bool - - for leftOffset, char := range str { - rightOffset := runeCount - (leftOffset + remaining) - - // Start skipping chars when the left and right offsets are equal - // Or in the case where we wont be able to do an even split: when the left offset is larger than the right offset - if leftOffset == rightOffset || (leftOffset > rightOffset && !skip) { - skip = true - truncated += indicator - } - - if skip && remaining > 0 { - // Skip char and decrement the remaining skip counter - remaining-- - continue - } - - // Add char to result string - truncated += string(char) - } - - // Return truncated string - return truncated -} - -func ISODateToLocal(iso string) string { - t, err := time.Parse(time.RFC3339, iso) - if err != nil { - return iso - } - local := t.Local() - year, month, day := local.Date() - hour, min, sec := local.Clock() - return fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, min, sec) -} - -func MeasureTransferRate() func(int64) string { - start := time.Now() - - return func(bytes int64) string { - seconds := int64(time.Now().Sub(start).Seconds()) - if seconds < 1 { - return fmt.Sprintf("%s/s", FileSizeFormat(bytes, false)) - } - bps := bytes / seconds - return fmt.Sprintf("%s/s", FileSizeFormat(bps, false)) - } -} - -// Prints a map in the provided order with one key-value-pair per line -func Print(m map[string]string, keyOrder []string) { - for _, key := range keyOrder { - value, ok := m[key] - if ok && value != "" { - fmt.Printf("%s: %s\n", key, value) - } - } -} - -// Prints items in columns with header and correct padding -func PrintColumns(items []map[string]string, keyOrder []string, columnSpacing int, noHeader bool) { - - if !noHeader { - // Create header - header := make(map[string]string) - for _, key := range keyOrder { - header[key] = key - } - - // Add header as the first element of items - items = append([]map[string]string{header}, items...) - } - - // Get a padding function for each column - padFns := make(map[string]func(string) string) - for _, key := range keyOrder { - padFns[key] = columnPadder(items, key, columnSpacing) - } - - // Loop, pad and print items - for _, item := range items { - var line string - - // Add each column to line with correct padding - for _, key := range keyOrder { - value, _ := item[key] - line += padFns[key](value) - } - - // Print line - fmt.Println(line) - } -} - -// Returns a padding function, that pads input to the longest string in items -func columnPadder(items []map[string]string, key string, spacing int) func(string) string { - // Holds length of longest string - var max int - - // Find the longest string of type key in the array - for _, item := range items { - str := item[key] - length := utf8.RuneCountInString(str) - if length > max { - max = length - } - } - - // Return padding function - return func(str string) string { - column := str - for utf8.RuneCountInString(column) < max+spacing { - column += " " - } - return column - } -} - -func inArray(needle string, haystack []string) bool { - for _, x := range haystack { - if needle == x { - return true - } - } - - return false -} From f35fd0892688ff638b30dcd48fdd56b9e2627cf1 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 17 Jan 2016 21:13:46 +0100 Subject: [PATCH 016/195] One file per command --- drive/download.go | 55 ++++++++++++ drive/drive.go | 23 +++++ drive/files.go | 139 ----------------------------- drive/info.go | 49 ++++++++++ drive/list.go | 55 ++++++++++++ drive/mkdir.go | 32 +++++++ drive/print.go | 139 ----------------------------- drive/{permissions.go => share.go} | 8 ++ drive/types.go | 90 ------------------- drive/upload.go | 62 +++++++++++++ drive/util.go | 100 ++++++++++++++++++++- 11 files changed, 383 insertions(+), 369 deletions(-) create mode 100644 drive/download.go create mode 100644 drive/drive.go delete mode 100644 drive/files.go create mode 100644 drive/info.go create mode 100644 drive/list.go create mode 100644 drive/mkdir.go delete mode 100644 drive/print.go rename drive/{permissions.go => share.go} (88%) delete mode 100644 drive/types.go create mode 100644 drive/upload.go diff --git a/drive/download.go b/drive/download.go new file mode 100644 index 00000000..9a35912c --- /dev/null +++ b/drive/download.go @@ -0,0 +1,55 @@ +package drive + +import ( + "fmt" + "io" + "os" +) + +type DownloadFileArgs struct { + Id string + Force bool + NoProgress bool + Stdout bool +} + +func (self *Drive) Download(args DownloadFileArgs) { + getFile := self.service.Files.Get(args.Id) + + f, err := getFile.Do() + errorF(err, "Failed to get file: %s", err) + + res, err := getFile.Download() + errorF(err, "Failed to download file: %s", err) + + // Close body on function exit + defer res.Body.Close() + + if args.Stdout { + // Write file content to stdout + io.Copy(os.Stdout, res.Body) + return + } + + // Check if file exists + if !args.Force && fileExists(f.Name) { + exitF("File '%s' already exists, use --force to overwrite", f.Name) + } + + // Create new file + outFile, err := os.Create(f.Name) + errorF(err, "Unable to create new file: %s", err) + + // Close file on function exit + defer outFile.Close() + + // Save file to disk + bytes, err := io.Copy(outFile, res.Body) + errorF(err, "Failed saving file: %s", err) + + fmt.Printf("Downloaded '%s' at %s, total %d\n", f.Name, "x/s", bytes) + + //if deleteSourceFile { + // self.Delete(args.Id) + //} +} diff --git a/drive/drive.go b/drive/drive.go new file mode 100644 index 00000000..047030a9 --- /dev/null +++ b/drive/drive.go @@ -0,0 +1,23 @@ +package drive + +import ( + "net/http" + "google.golang.org/api/drive/v3" +) + +type Client interface { + Service() *drive.Service + Http() *http.Client +} + +type Drive struct { + service *drive.Service + http *http.Client +} + +func NewDrive(client Client) *Drive { + return &Drive{ + service: client.Service(), + http: client.Http(), + } +} diff --git a/drive/files.go b/drive/files.go deleted file mode 100644 index b6eed04f..00000000 --- a/drive/files.go +++ /dev/null @@ -1,139 +0,0 @@ -package drive - -import ( - "fmt" - "io" - "mime" - "os" - "path/filepath" - "google.golang.org/api/drive/v3" - "golang.org/x/net/context" -) - -const DirectoryMimeType = "application/vnd.google-apps.folder" - - -func (self *Drive) List(args ListFilesArgs) { - fileList, err := self.service.Files.List().PageSize(args.MaxFiles).Q(args.Query).Fields("nextPageToken", "files(id,name,size,createdTime)").Do() - errorF(err, "Failed listing files: %s\n", err) - - PrintFileList(PrintFileListArgs{ - Files: fileList.Files, - NameWidth: int(args.NameWidth), - SkipHeader: args.SkipHeader, - SizeInBytes: args.SizeInBytes, - }) -} - - -func (self *Drive) Download(args DownloadFileArgs) { - getFile := self.service.Files.Get(args.Id) - - f, err := getFile.Do() - errorF(err, "Failed to get file: %s", err) - - res, err := getFile.Download() - errorF(err, "Failed to download file: %s", err) - - // Close body on function exit - defer res.Body.Close() - - if args.Stdout { - // Write file content to stdout - io.Copy(os.Stdout, res.Body) - return - } - - // Check if file exists - if !args.Force && fileExists(f.Name) { - exitF("File '%s' already exists, use --force to overwrite", f.Name) - } - - // Create new file - outFile, err := os.Create(f.Name) - errorF(err, "Unable to create new file: %s", err) - - // Close file on function exit - defer outFile.Close() - - // Save file to disk - bytes, err := io.Copy(outFile, res.Body) - errorF(err, "Failed saving file: %s", err) - - fmt.Printf("Downloaded '%s' at %s, total %d\n", f.Name, "x/s", bytes) - - //if deleteSourceFile { - // self.Delete(args.Id) - //} -} - -func (self *Drive) Upload(args UploadFileArgs) { - //if args.Stdin { - // self.uploadStdin() - //} - - srcFile, err := os.Open(args.Path) - errorF(err, "Failed to open file: %s", err) - - srcFileInfo, err := srcFile.Stat() - errorF(err, "Failed to read file metadata: %s", err) - - // Instantiate empty drive file - dstFile := &drive.File{} - - // Use provided file name or use filename - if args.Name == "" { - dstFile.Name = filepath.Base(srcFileInfo.Name()) - } else { - dstFile.Name = args.Name - } - - // Set provided mime type or get type based on file extension - if args.Mime == "" { - dstFile.MimeType = mime.TypeByExtension(filepath.Ext(dstFile.Name)) - } else { - dstFile.MimeType = args.Mime - } - - // Set parent folder if provided - if args.Parent != "" { - dstFile.Parents = []string{args.Parent} - } - - f, err := self.service.Files.Create(dstFile).ResumableMedia(context.Background(), srcFile, srcFileInfo.Size(), dstFile.MimeType).Do() - errorF(err, "Failed to upload file: %s", err) - - fmt.Printf("Uploaded '%s' at %s, total %d\n", f.Name, "x/s", f.Size) - //if args.Share { - // self.Share(TODO) - //} -} - -func (self *Drive) Info(args FileInfoArgs) { - f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "createdTime", "modifiedTime", "md5Checksum", "mimeType", "parents", "shared", "description").Do() - errorF(err, "Failed to get file: %s", err) - - PrintFileInfo(PrintFileInfoArgs{ - File: f, - SizeInBytes: args.SizeInBytes, - }) -} - -func (self *Drive) Mkdir(args MkdirArgs) { - dstFile := &drive.File{Name: args.Name, MimeType: DirectoryMimeType} - - // Set parent folder if provided - if args.Parent != "" { - dstFile.Parents = []string{args.Parent} - } - - // Create folder - f, err := self.service.Files.Create(dstFile).Do() - errorF(err, "Failed to create folder: %s", err) - - PrintFileInfo(PrintFileInfoArgs{File: f}) - - //if args.Share { - // self.Share(TODO) - //} -} diff --git a/drive/info.go b/drive/info.go new file mode 100644 index 00000000..f5f5602f --- /dev/null +++ b/drive/info.go @@ -0,0 +1,49 @@ +package drive + +import ( + "fmt" + "google.golang.org/api/drive/v3" +) + +type FileInfoArgs struct { + Id string + SizeInBytes bool +} + +func (self *Drive) Info(args FileInfoArgs) { + f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "createdTime", "modifiedTime", "md5Checksum", "mimeType", "parents", "shared", "description").Do() + errorF(err, "Failed to get file: %s", err) + + PrintFileInfo(PrintFileInfoArgs{ + File: f, + SizeInBytes: args.SizeInBytes, + }) +} + +type PrintFileInfoArgs struct { + File *drive.File + SizeInBytes bool +} + +func PrintFileInfo(args PrintFileInfoArgs) { + f := args.File + + items := []kv{ + kv{"Id", f.Id}, + kv{"Name", f.Name}, + kv{"Description", f.Description}, + kv{"Mime", f.MimeType}, + kv{"Size", formatSize(f.Size, args.SizeInBytes)}, + kv{"Created", formatDatetime(f.CreatedTime)}, + kv{"Modified", formatDatetime(f.ModifiedTime)}, + kv{"Md5sum", f.Md5Checksum}, + kv{"Shared", formatBool(f.Shared)}, + kv{"Parents", formatList(f.Parents)}, + } + + for _, item := range items { + if item.value() != "" { + fmt.Printf("%s: %s\n", item.key(), item.value()) + } + } +} diff --git a/drive/list.go b/drive/list.go new file mode 100644 index 00000000..c5427856 --- /dev/null +++ b/drive/list.go @@ -0,0 +1,55 @@ +package drive + +import ( + "fmt" + "os" + "text/tabwriter" + "google.golang.org/api/drive/v3" +) + +type ListFilesArgs struct { + MaxFiles int64 + NameWidth int64 + Query string + SkipHeader bool + SizeInBytes bool +} + +func (self *Drive) List(args ListFilesArgs) { + fileList, err := self.service.Files.List().PageSize(args.MaxFiles).Q(args.Query).Fields("nextPageToken", "files(id,name,size,createdTime)").Do() + errorF(err, "Failed listing files: %s\n", err) + + PrintFileList(PrintFileListArgs{ + Files: fileList.Files, + NameWidth: int(args.NameWidth), + SkipHeader: args.SkipHeader, + SizeInBytes: args.SizeInBytes, + }) +} + +type PrintFileListArgs struct { + Files []*drive.File + NameWidth int + SkipHeader bool + SizeInBytes bool +} + +func PrintFileList(args PrintFileListArgs) { + w := new(tabwriter.Writer) + w.Init(os.Stdout, 0, 0, 3, ' ', 0) + + if !args.SkipHeader { + fmt.Fprintln(w, "Id\tName\tSize\tCreated") + } + + for _, f := range args.Files { + fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", + f.Id, + truncateString(f.Name, args.NameWidth), + formatSize(f.Size, args.SizeInBytes), + formatDatetime(f.CreatedTime), + ) + } + + w.Flush() +} diff --git a/drive/mkdir.go b/drive/mkdir.go new file mode 100644 index 00000000..82956492 --- /dev/null +++ b/drive/mkdir.go @@ -0,0 +1,32 @@ +package drive + +import ( + "google.golang.org/api/drive/v3" +) + +const DirectoryMimeType = "application/vnd.google-apps.folder" + +type MkdirArgs struct { + Name string + Parent string + Share bool +} + +func (self *Drive) Mkdir(args MkdirArgs) { + dstFile := &drive.File{Name: args.Name, MimeType: DirectoryMimeType} + + // Set parent folder if provided + if args.Parent != "" { + dstFile.Parents = []string{args.Parent} + } + + // Create folder + f, err := self.service.Files.Create(dstFile).Do() + errorF(err, "Failed to create folder: %s", err) + + PrintFileInfo(PrintFileInfoArgs{File: f}) + + //if args.Share { + // self.Share(TODO) + //} +} diff --git a/drive/print.go b/drive/print.go deleted file mode 100644 index 84d4ec76..00000000 --- a/drive/print.go +++ /dev/null @@ -1,139 +0,0 @@ -package drive - -import ( - "os" - "fmt" - "text/tabwriter" - "strings" - "strconv" - "unicode/utf8" - "time" -) - - -func PrintFileList(args PrintFileListArgs) { - w := new(tabwriter.Writer) - w.Init(os.Stdout, 0, 0, 3, ' ', 0) - - if !args.SkipHeader { - fmt.Fprintln(w, "Id\tName\tSize\tCreated") - } - - for _, f := range args.Files { - fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", - f.Id, - truncateString(f.Name, args.NameWidth), - formatSize(f.Size, args.SizeInBytes), - formatDatetime(f.CreatedTime), - ) - } - - w.Flush() -} - -func PrintFileInfo(args PrintFileInfoArgs) { - f := args.File - - items := []kv{ - kv{"Id", f.Id}, - kv{"Name", f.Name}, - kv{"Description", f.Description}, - kv{"Mime", f.MimeType}, - kv{"Size", formatSize(f.Size, args.SizeInBytes)}, - kv{"Created", formatDatetime(f.CreatedTime)}, - kv{"Modified", formatDatetime(f.ModifiedTime)}, - kv{"Md5sum", f.Md5Checksum}, - kv{"Shared", formatBool(f.Shared)}, - kv{"Parents", formatList(f.Parents)}, - } - - for _, item := range items { - if item.value() != "" { - fmt.Printf("%s: %s\n", item.key(), item.value()) - } - } -} - -func formatList(a []string) string { - return strings.Join(a, ", ") -} - -func formatSize(bytes int64, forceBytes bool) string { - if bytes == 0 { - return "" - } - - if forceBytes { - return fmt.Sprintf("%v B", bytes) - } - - units := []string{"B", "KB", "MB", "GB", "TB", "PB"} - - var i int - value := float64(bytes) - - for value > 1000 { - value /= 1000 - i++ - } - return fmt.Sprintf("%.1f %s", value, units[i]) -} - -func formatBool(b bool) string { - return strings.Title(strconv.FormatBool(b)) -} - -func formatDatetime(iso string) string { - t, err := time.Parse(time.RFC3339, iso) - if err != nil { - return iso - } - local := t.Local() - year, month, day := local.Date() - hour, min, sec := local.Clock() - return fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, min, sec) -} - -// Truncates string to given max length, and inserts ellipsis into -// the middle of the string to signify that the string has been truncated -func truncateString(str string, maxRunes int) string { - indicator := "..." - - // Number of runes in string - runeCount := utf8.RuneCountInString(str) - - // Return input string if length of input string is less than max length - // Input string is also returned if max length is less than 9 which is the minmal supported length - if runeCount <= maxRunes || maxRunes < 9 { - return str - } - - // Number of remaining runes to be removed - remaining := (runeCount - maxRunes) + utf8.RuneCountInString(indicator) - - var truncated string - var skip bool - - for leftOffset, char := range str { - rightOffset := runeCount - (leftOffset + remaining) - - // Start skipping chars when the left and right offsets are equal - // Or in the case where we wont be able to do an even split: when the left offset is larger than the right offset - if leftOffset == rightOffset || (leftOffset > rightOffset && !skip) { - skip = true - truncated += indicator - } - - if skip && remaining > 0 { - // Skip char and decrement the remaining skip counter - remaining-- - continue - } - - // Add char to result string - truncated += string(char) - } - - // Return truncated string - return truncated -} diff --git a/drive/permissions.go b/drive/share.go similarity index 88% rename from drive/permissions.go rename to drive/share.go index 1e2c004a..7e7036d9 100644 --- a/drive/permissions.go +++ b/drive/share.go @@ -5,6 +5,14 @@ import ( "google.golang.org/api/drive/v3" ) +type ShareArgs struct { + FileId string + Role string + Type string + Email string + Discoverable bool + Revoke bool +} func (self *Drive) Share(args ShareArgs) { if args.Revoke { diff --git a/drive/types.go b/drive/types.go deleted file mode 100644 index f252ddef..00000000 --- a/drive/types.go +++ /dev/null @@ -1,90 +0,0 @@ -package drive - -import ( - "net/http" - "google.golang.org/api/drive/v3" -) - -type Client interface { - Service() *drive.Service - Http() *http.Client -} - -type Drive struct { - service *drive.Service - http *http.Client -} - -func NewDrive(client Client) *Drive { - return &Drive{ - service: client.Service(), - http: client.Http(), - } -} - -type ListFilesArgs struct { - MaxFiles int64 - NameWidth int64 - Query string - SkipHeader bool - SizeInBytes bool -} - -type DownloadFileArgs struct { - Id string - Force bool - NoProgress bool - Stdout bool -} - -type UploadFileArgs struct { - Path string - Name string - Parent string - Mime string - Recursive bool - Stdin bool - Share bool -} - -type FileInfoArgs struct { - Id string - SizeInBytes bool -} - -type MkdirArgs struct { - Name string - Parent string - Share bool -} - -type ShareArgs struct { - FileId string - Role string - Type string - Email string - Discoverable bool - Revoke bool -} - -type PrintFileListArgs struct { - Files []*drive.File - NameWidth int - SkipHeader bool - SizeInBytes bool -} - -type PrintFileInfoArgs struct { - File *drive.File - SizeInBytes bool -} - -type kv [2]string - -func (self kv) key() string { - return self[0] -} - -func (self kv) value() string { - return self[1] -} diff --git a/drive/upload.go b/drive/upload.go new file mode 100644 index 00000000..ed373ef8 --- /dev/null +++ b/drive/upload.go @@ -0,0 +1,62 @@ +package drive + +import ( + "fmt" + "mime" + "os" + "path/filepath" + "google.golang.org/api/drive/v3" + "golang.org/x/net/context" +) + +type UploadFileArgs struct { + Path string + Name string + Parent string + Mime string + Recursive bool + Stdin bool + Share bool +} + +func (self *Drive) Upload(args UploadFileArgs) { + //if args.Stdin { + // self.uploadStdin() + //} + + srcFile, err := os.Open(args.Path) + errorF(err, "Failed to open file: %s", err) + + srcFileInfo, err := srcFile.Stat() + errorF(err, "Failed to read file metadata: %s", err) + + // Instantiate empty drive file + dstFile := &drive.File{} + + // Use provided file name or use filename + if args.Name == "" { + dstFile.Name = filepath.Base(srcFileInfo.Name()) + } else { + dstFile.Name = args.Name + } + + // Set provided mime type or get type based on file extension + if args.Mime == "" { + dstFile.MimeType = mime.TypeByExtension(filepath.Ext(dstFile.Name)) + } else { + dstFile.MimeType = args.Mime + } + + // Set parent folder if provided + if args.Parent != "" { + dstFile.Parents = []string{args.Parent} + } + + f, err := self.service.Files.Create(dstFile).ResumableMedia(context.Background(), srcFile, srcFileInfo.Size(), dstFile.MimeType).Do() + errorF(err, "Failed to upload file: %s", err) + + fmt.Printf("Uploaded '%s' at %s, total %d\n", f.Name, "x/s", f.Size) + //if args.Share { + // self.Share(TODO) + //} +} diff --git a/drive/util.go b/drive/util.go index 8dda213b..bc2d5c3b 100644 --- a/drive/util.go +++ b/drive/util.go @@ -1,10 +1,108 @@ package drive import ( - "fmt" "os" + "fmt" + "strings" + "strconv" + "unicode/utf8" + "time" ) +type kv [2]string + +func (self kv) key() string { + return self[0] +} + +func (self kv) value() string { + return self[1] +} + +func formatList(a []string) string { + return strings.Join(a, ", ") +} + +func formatSize(bytes int64, forceBytes bool) string { + if bytes == 0 { + return "" + } + + if forceBytes { + return fmt.Sprintf("%v B", bytes) + } + + units := []string{"B", "KB", "MB", "GB", "TB", "PB"} + + var i int + value := float64(bytes) + + for value > 1000 { + value /= 1000 + i++ + } + return fmt.Sprintf("%.1f %s", value, units[i]) +} + +func formatBool(b bool) string { + return strings.Title(strconv.FormatBool(b)) +} + +func formatDatetime(iso string) string { + t, err := time.Parse(time.RFC3339, iso) + if err != nil { + return iso + } + local := t.Local() + year, month, day := local.Date() + hour, min, sec := local.Clock() + return fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, min, sec) +} + +// Truncates string to given max length, and inserts ellipsis into +// the middle of the string to signify that the string has been truncated +func truncateString(str string, maxRunes int) string { + indicator := "..." + + // Number of runes in string + runeCount := utf8.RuneCountInString(str) + + // Return input string if length of input string is less than max length + // Input string is also returned if max length is less than 9 which is the minmal supported length + if runeCount <= maxRunes || maxRunes < 9 { + return str + } + + // Number of remaining runes to be removed + remaining := (runeCount - maxRunes) + utf8.RuneCountInString(indicator) + + var truncated string + var skip bool + + for leftOffset, char := range str { + rightOffset := runeCount - (leftOffset + remaining) + + // Start skipping chars when the left and right offsets are equal + // Or in the case where we wont be able to do an even split: when the left offset is larger than the right offset + if leftOffset == rightOffset || (leftOffset > rightOffset && !skip) { + skip = true + truncated += indicator + } + + if skip && remaining > 0 { + // Skip char and decrement the remaining skip counter + remaining-- + continue + } + + // Add char to result string + truncated += string(char) + } + + // Return truncated string + return truncated +} + func errorF(err error, format string, a ...interface{}) { if err == nil { return From 1d1dd76cedc719b90cbd131d01c939a17b879648 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 17 Jan 2016 21:38:08 +0100 Subject: [PATCH 017/195] Implement url --- drive/url.go | 26 ++++++++++++++++++++++++++ gdrive.go | 2 +- handlers.go | 9 +++++++++ 3 files changed, 36 insertions(+), 1 deletion(-) create mode 100644 drive/url.go diff --git a/drive/url.go b/drive/url.go new file mode 100644 index 00000000..2dc429d5 --- /dev/null +++ b/drive/url.go @@ -0,0 +1,26 @@ +package drive + +import ( + "fmt" +) + +type UrlArgs struct { + FileId string + DownloadUrl bool +} + +func (self *Drive) Url(args UrlArgs) { + if args.DownloadUrl { + fmt.Println(downloadUrl(args.FileId)) + return + } + fmt.Println(previewUrl(args.FileId)) +} + +func previewUrl(id string) string { + return fmt.Sprintf("https://drive.google.com/uc?id=%s", id) +} + +func downloadUrl(id string) string { + return fmt.Sprintf("https://drive.google.com/uc?id=%s&export=download", id) +} diff --git a/gdrive.go b/gdrive.go index 3f9ec90b..c2f2fbae 100644 --- a/gdrive.go +++ b/gdrive.go @@ -224,7 +224,7 @@ func main() { &cli.Handler{ Pattern: "[global options] url [options] ", Description: "Get url to file or directory", - Callback: handler, + Callback: urlHandler, Flags: cli.Flags{ "global options": globalFlags, "options": []cli.Flag{ diff --git a/handlers.go b/handlers.go index 3d90dc7e..d74eb1a7 100644 --- a/handlers.go +++ b/handlers.go @@ -82,6 +82,15 @@ func shareHandler(ctx cli.Context) { }) } +func urlHandler(ctx cli.Context) { + args := ctx.Args() + + newDrive(args).Url(drive.UrlArgs{ + FileId: args.String("id"), + DownloadUrl: args.Bool("download"), + }) +} + func deleteHandler(ctx cli.Context) { fmt.Println("Deleting...") } From f4dd433b66f7d25bca32df758e541c0eb948669b Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 17 Jan 2016 21:55:19 +0100 Subject: [PATCH 018/195] Implement delete --- drive/delete.go | 18 ++++++++++++++++++ handlers.go | 6 +++++- 2 files changed, 23 insertions(+), 1 deletion(-) create mode 100644 drive/delete.go diff --git a/drive/delete.go b/drive/delete.go new file mode 100644 index 00000000..ebf72edc --- /dev/null +++ b/drive/delete.go @@ -0,0 +1,18 @@ +package drive + +import ( + "fmt" +) + +type DeleteArgs struct { + Id string +} + +func (self *Drive) Delete(args DeleteArgs) { + f, err := self.service.Files.Get(args.Id).Fields("name").Do() + errorF(err, "Failed to get file: %s", err) + + err = self.service.Files.Delete(args.Id).Do() + errorF(err, "Failed to delete file") + fmt.Printf("Removed file '%s'\n", f.Name) +} diff --git a/handlers.go b/handlers.go index d74eb1a7..93250268 100644 --- a/handlers.go +++ b/handlers.go @@ -92,7 +92,11 @@ func urlHandler(ctx cli.Context) { } func deleteHandler(ctx cli.Context) { - fmt.Println("Deleting...") + args := ctx.Args() + + newDrive(args).Delete(drive.DeleteArgs{ + Id: args.String("id"), + }) } func handler(ctx cli.Context) { From 742c5a2f7b16a5323eddfa51cbd387c504d7e02a Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 17 Jan 2016 22:50:26 +0100 Subject: [PATCH 019/195] Implement about --- drive/about.go | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++ gdrive.go | 18 +++++++++++++++--- handlers.go | 10 ++++++++++ 3 files changed, 75 insertions(+), 3 deletions(-) create mode 100644 drive/about.go diff --git a/drive/about.go b/drive/about.go new file mode 100644 index 00000000..0dbeca22 --- /dev/null +++ b/drive/about.go @@ -0,0 +1,50 @@ +package drive + +import ( + "fmt" + "os" + "text/tabwriter" +) + +type AboutArgs struct { + SizeInBytes bool + ImportFormats bool + ExportFormats bool +} + +func (self *Drive) About(args AboutArgs) { + about, err := self.service.About.Get().Fields("exportFormats", "importFormats", "maxImportSizes", "maxUploadSize", "storageQuota", "user").Do() + errorF(err, "Failed to get about %s", err) + + if args.ExportFormats { + printSupportedFormats(about.ExportFormats) + return + } + + if args.ImportFormats { + printSupportedFormats(about.ImportFormats) + return + } + + user := about.User + quota := about.StorageQuota + + fmt.Printf("User: %s, %s\n", user.DisplayName, user.EmailAddress) + fmt.Printf("Used: %s\n", formatSize(quota.UsageInDrive, args.SizeInBytes)) + fmt.Printf("Free: %s\n", formatSize(quota.Limit - quota.UsageInDrive, args.SizeInBytes)) + fmt.Printf("Total: %s\n", formatSize(quota.Limit, args.SizeInBytes)) + fmt.Printf("Max upload size: %s\n", formatSize(about.MaxUploadSize, args.SizeInBytes)) +} + +func printSupportedFormats(formats map[string][]string) { + w := new(tabwriter.Writer) + w.Init(os.Stdout, 0, 0, 3, ' ', 0) + + fmt.Fprintln(w, "From\tTo") + + for from, toFormats := range formats { + fmt.Fprintf(w, "%s\t%s\n", from, formatList(toFormats)) + } + + w.Flush() +} diff --git a/gdrive.go b/gdrive.go index c2f2fbae..f884b8e2 100644 --- a/gdrive.go +++ b/gdrive.go @@ -246,9 +246,9 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global options] quota [options]", - Description: "Show free space", - Callback: handler, + Pattern: "[global options] about [options]", + Description: "Google drive metadata, quota usage, import/export formats", + Callback: aboutHandler, Flags: cli.Flags{ "global options": globalFlags, "options": []cli.Flag{ @@ -258,6 +258,18 @@ func main() { Description: "Show size in bytes", OmitValue: true, }, + cli.BoolFlag{ + Name: "exportFormats", + Patterns: []string{"--export"}, + Description: "Show supported export formats", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "importFormats", + Patterns: []string{"--import"}, + Description: "Show supported import formats", + OmitValue: true, + }, }, }, }, diff --git a/handlers.go b/handlers.go index 93250268..d4a97d84 100644 --- a/handlers.go +++ b/handlers.go @@ -99,6 +99,16 @@ func deleteHandler(ctx cli.Context) { }) } +func aboutHandler(ctx cli.Context) { + args := ctx.Args() + + newDrive(args).About(drive.AboutArgs{ + SizeInBytes: args.Bool("sizeInBytes"), + ImportFormats: args.Bool("importFormats"), + ExportFormats: args.Bool("exportFormats"), + }) +} + func handler(ctx cli.Context) { fmt.Println("handler...") } From c88aba0d9b14777be0915541503ca0e1a7936f67 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 17 Jan 2016 22:52:38 +0100 Subject: [PATCH 020/195] Remove handler --- handlers.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/handlers.go b/handlers.go index d4a97d84..ef7688ed 100644 --- a/handlers.go +++ b/handlers.go @@ -109,10 +109,6 @@ func aboutHandler(ctx cli.Context) { }) } -func handler(ctx cli.Context) { - fmt.Println("handler...") -} - func printVersion(ctx cli.Context) { fmt.Printf("%s v%s\n", Name, Version) } From d4d1b00c9609a4d493f79bdd74bae5dc60d37ed7 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 17 Jan 2016 23:12:26 +0100 Subject: [PATCH 021/195] Return error --- drive/about.go | 7 +++++-- drive/delete.go | 12 +++++++++--- drive/download.go | 25 +++++++++++++++++-------- drive/info.go | 8 ++++++-- drive/list.go | 8 ++++++-- drive/mkdir.go | 8 ++++++-- drive/share.go | 13 +++++++++---- drive/upload.go | 15 +++++++++++---- drive/util.go | 16 ---------------- handlers.go | 33 ++++++++++++++++----------------- util.go | 7 +++++++ 11 files changed, 92 insertions(+), 60 deletions(-) diff --git a/drive/about.go b/drive/about.go index 0dbeca22..bea9fda0 100644 --- a/drive/about.go +++ b/drive/about.go @@ -12,9 +12,11 @@ type AboutArgs struct { ExportFormats bool } -func (self *Drive) About(args AboutArgs) { +func (self *Drive) About(args AboutArgs) (err error) { about, err := self.service.About.Get().Fields("exportFormats", "importFormats", "maxImportSizes", "maxUploadSize", "storageQuota", "user").Do() - errorF(err, "Failed to get about %s", err) + if err != nil { + return fmt.Errorf("Failed to get about: %s", err) + } if args.ExportFormats { printSupportedFormats(about.ExportFormats) @@ -34,6 +36,7 @@ func (self *Drive) About(args AboutArgs) { fmt.Printf("Free: %s\n", formatSize(quota.Limit - quota.UsageInDrive, args.SizeInBytes)) fmt.Printf("Total: %s\n", formatSize(quota.Limit, args.SizeInBytes)) fmt.Printf("Max upload size: %s\n", formatSize(about.MaxUploadSize, args.SizeInBytes)) + return } func printSupportedFormats(formats map[string][]string) { diff --git a/drive/delete.go b/drive/delete.go index ebf72edc..6e868eb2 100644 --- a/drive/delete.go +++ b/drive/delete.go @@ -8,11 +8,17 @@ type DeleteArgs struct { Id string } -func (self *Drive) Delete(args DeleteArgs) { +func (self *Drive) Delete(args DeleteArgs) (err error) { f, err := self.service.Files.Get(args.Id).Fields("name").Do() - errorF(err, "Failed to get file: %s", err) + if err != nil { + return fmt.Errorf("Failed to get file: %s", err) + } err = self.service.Files.Delete(args.Id).Do() - errorF(err, "Failed to delete file") + if err != nil { + return fmt.Errorf("Failed to delete file", err) + } + fmt.Printf("Removed file '%s'\n", f.Name) + return } diff --git a/drive/download.go b/drive/download.go index 9a35912c..57e0160c 100644 --- a/drive/download.go +++ b/drive/download.go @@ -13,43 +13,52 @@ type DownloadFileArgs struct { Stdout bool } -func (self *Drive) Download(args DownloadFileArgs) { +func (self *Drive) Download(args DownloadFileArgs) (err error) { getFile := self.service.Files.Get(args.Id) f, err := getFile.Do() - errorF(err, "Failed to get file: %s", err) + if err != nil { + return fmt.Errorf("Failed to get file: %s", err) + } res, err := getFile.Download() - errorF(err, "Failed to download file: %s", err) + if err != nil { + return fmt.Errorf("Failed to download file: %s", err) + } // Close body on function exit defer res.Body.Close() if args.Stdout { // Write file content to stdout - io.Copy(os.Stdout, res.Body) - return + _, err := io.Copy(os.Stdout, res.Body) + return err } // Check if file exists if !args.Force && fileExists(f.Name) { - exitF("File '%s' already exists, use --force to overwrite", f.Name) + return fmt.Errorf("File '%s' already exists, use --force to overwrite", f.Name) } // Create new file outFile, err := os.Create(f.Name) - errorF(err, "Unable to create new file: %s", err) + if err != nil { + return fmt.Errorf("Unable to create new file: %s", err) + } // Close file on function exit defer outFile.Close() // Save file to disk bytes, err := io.Copy(outFile, res.Body) - errorF(err, "Failed saving file: %s", err) + if err != nil { + return fmt.Errorf("Failed saving file: %s", err) + } fmt.Printf("Downloaded '%s' at %s, total %d\n", f.Name, "x/s", bytes) //if deleteSourceFile { // self.Delete(args.Id) //} + return } diff --git a/drive/info.go b/drive/info.go index f5f5602f..ca09d0a8 100644 --- a/drive/info.go +++ b/drive/info.go @@ -10,14 +10,18 @@ type FileInfoArgs struct { SizeInBytes bool } -func (self *Drive) Info(args FileInfoArgs) { +func (self *Drive) Info(args FileInfoArgs) (err error) { f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "createdTime", "modifiedTime", "md5Checksum", "mimeType", "parents", "shared", "description").Do() - errorF(err, "Failed to get file: %s", err) + if err != nil { + return fmt.Errorf("Failed to get file: %s", err) + } PrintFileInfo(PrintFileInfoArgs{ File: f, SizeInBytes: args.SizeInBytes, }) + + return } type PrintFileInfoArgs struct { diff --git a/drive/list.go b/drive/list.go index c5427856..b2e3662b 100644 --- a/drive/list.go +++ b/drive/list.go @@ -15,9 +15,11 @@ type ListFilesArgs struct { SizeInBytes bool } -func (self *Drive) List(args ListFilesArgs) { +func (self *Drive) List(args ListFilesArgs) (err error) { fileList, err := self.service.Files.List().PageSize(args.MaxFiles).Q(args.Query).Fields("nextPageToken", "files(id,name,size,createdTime)").Do() - errorF(err, "Failed listing files: %s\n", err) + if err != nil { + return fmt.Errorf("Failed listing files: %s", err) + } PrintFileList(PrintFileListArgs{ Files: fileList.Files, @@ -25,6 +27,8 @@ func (self *Drive) List(args ListFilesArgs) { SkipHeader: args.SkipHeader, SizeInBytes: args.SizeInBytes, }) + + return } type PrintFileListArgs struct { diff --git a/drive/mkdir.go b/drive/mkdir.go index 82956492..40804741 100644 --- a/drive/mkdir.go +++ b/drive/mkdir.go @@ -2,6 +2,7 @@ package drive import ( "google.golang.org/api/drive/v3" + "fmt" ) const DirectoryMimeType = "application/vnd.google-apps.folder" @@ -12,7 +13,7 @@ type MkdirArgs struct { Share bool } -func (self *Drive) Mkdir(args MkdirArgs) { +func (self *Drive) Mkdir(args MkdirArgs) (err error) { dstFile := &drive.File{Name: args.Name, MimeType: DirectoryMimeType} // Set parent folder if provided @@ -22,11 +23,14 @@ func (self *Drive) Mkdir(args MkdirArgs) { // Create folder f, err := self.service.Files.Create(dstFile).Do() - errorF(err, "Failed to create folder: %s", err) + if err != nil { + return fmt.Errorf("Failed to create folder: %s", err) + } PrintFileInfo(PrintFileInfoArgs{File: f}) //if args.Share { // self.Share(TODO) //} + return } diff --git a/drive/share.go b/drive/share.go index 7e7036d9..37c1bf8c 100644 --- a/drive/share.go +++ b/drive/share.go @@ -14,10 +14,12 @@ type ShareArgs struct { Revoke bool } -func (self *Drive) Share(args ShareArgs) { +func (self *Drive) Share(args ShareArgs) (err error) { if args.Revoke { - err := self.deletePermissions(args) - errorF(err, "Failed delete permissions: %s", err) + err = self.deletePermissions(args) + if err != nil { + return fmt.Errorf("Failed delete permissions: %s", err) + } } permission := &drive.Permission{ @@ -28,9 +30,12 @@ func (self *Drive) Share(args ShareArgs) { } p, err := self.service.Permissions.Create(args.FileId, permission).Do() - errorF(err, "Failed share file: %s", err) + if err != nil { + return fmt.Errorf("Failed share file: %s", err) + } fmt.Println(p) + return } func (self *Drive) deletePermissions(args ShareArgs) error { diff --git a/drive/upload.go b/drive/upload.go index ed373ef8..16ae9400 100644 --- a/drive/upload.go +++ b/drive/upload.go @@ -19,16 +19,20 @@ type UploadFileArgs struct { Share bool } -func (self *Drive) Upload(args UploadFileArgs) { +func (self *Drive) Upload(args UploadFileArgs) (err error) { //if args.Stdin { // self.uploadStdin() //} srcFile, err := os.Open(args.Path) - errorF(err, "Failed to open file: %s", err) + if err != nil { + return fmt.Errorf("Failed to open file: %s", err) + } srcFileInfo, err := srcFile.Stat() - errorF(err, "Failed to read file metadata: %s", err) + if err != nil { + return fmt.Errorf("Failed to read file metadata: %s", err) + } // Instantiate empty drive file dstFile := &drive.File{} @@ -53,10 +57,13 @@ func (self *Drive) Upload(args UploadFileArgs) { } f, err := self.service.Files.Create(dstFile).ResumableMedia(context.Background(), srcFile, srcFileInfo.Size(), dstFile.MimeType).Do() - errorF(err, "Failed to upload file: %s", err) + if err != nil { + return fmt.Errorf("Failed to upload file: %s", err) + } fmt.Printf("Uploaded '%s' at %s, total %d\n", f.Name, "x/s", f.Size) //if args.Share { // self.Share(TODO) //} + return } diff --git a/drive/util.go b/drive/util.go index bc2d5c3b..7ad0945c 100644 --- a/drive/util.go +++ b/drive/util.go @@ -103,22 +103,6 @@ func truncateString(str string, maxRunes int) string { return truncated } -func errorF(err error, format string, a ...interface{}) { - if err == nil { - return - } - - fmt.Fprintf(os.Stderr, format, a...) - fmt.Println("") - os.Exit(1) -} - -func exitF(format string, a ...interface{}) { - fmt.Fprintf(os.Stderr, format, a...) - fmt.Println("") - os.Exit(1) -} - func fileExists(path string) bool { _, err := os.Stat(path) if err == nil { diff --git a/handlers.go b/handlers.go index ef7688ed..5ff5cdb0 100644 --- a/handlers.go +++ b/handlers.go @@ -15,31 +15,30 @@ const TokenFilename = "token_v2.json" func listHandler(ctx cli.Context) { args := ctx.Args() - - newDrive(args).List(drive.ListFilesArgs{ + err := newDrive(args).List(drive.ListFilesArgs{ MaxFiles: args.Int64("maxFiles"), NameWidth: args.Int64("nameWidth"), Query: args.String("query"), SkipHeader: args.Bool("skipHeader"), SizeInBytes: args.Bool("sizeInBytes"), }) + checkErr(err) } func downloadHandler(ctx cli.Context) { args := ctx.Args() - - newDrive(args).Download(drive.DownloadFileArgs{ + err := newDrive(args).Download(drive.DownloadFileArgs{ Id: args.String("id"), Force: args.Bool("force"), Stdout: args.Bool("stdout"), NoProgress: args.Bool("noprogress"), }) + checkErr(err) } func uploadHandler(ctx cli.Context) { args := ctx.Args() - - newDrive(args).Upload(drive.UploadFileArgs{ + err := newDrive(args).Upload(drive.UploadFileArgs{ Path: args.String("path"), Name: args.String("name"), Parent: args.String("parent"), @@ -48,31 +47,31 @@ func uploadHandler(ctx cli.Context) { Stdin: args.Bool("stdin"), Share: args.Bool("share"), }) + checkErr(err) } func infoHandler(ctx cli.Context) { args := ctx.Args() - - newDrive(args).Info(drive.FileInfoArgs{ + err := newDrive(args).Info(drive.FileInfoArgs{ Id: args.String("id"), SizeInBytes: args.Bool("sizeInBytes"), }) + checkErr(err) } func mkdirHandler(ctx cli.Context) { args := ctx.Args() - - newDrive(args).Mkdir(drive.MkdirArgs{ + err := newDrive(args).Mkdir(drive.MkdirArgs{ Name: args.String("name"), Parent: args.String("parent"), Share: args.Bool("share"), }) + checkErr(err) } func shareHandler(ctx cli.Context) { args := ctx.Args() - - newDrive(args).Share(drive.ShareArgs{ + err := newDrive(args).Share(drive.ShareArgs{ FileId: args.String("id"), Role: args.String("role"), Type: args.String("type"), @@ -80,11 +79,11 @@ func shareHandler(ctx cli.Context) { Discoverable: args.Bool("discoverable"), Revoke: args.Bool("revoke"), }) + checkErr(err) } func urlHandler(ctx cli.Context) { args := ctx.Args() - newDrive(args).Url(drive.UrlArgs{ FileId: args.String("id"), DownloadUrl: args.Bool("download"), @@ -93,20 +92,20 @@ func urlHandler(ctx cli.Context) { func deleteHandler(ctx cli.Context) { args := ctx.Args() - - newDrive(args).Delete(drive.DeleteArgs{ + err := newDrive(args).Delete(drive.DeleteArgs{ Id: args.String("id"), }) + checkErr(err) } func aboutHandler(ctx cli.Context) { args := ctx.Args() - - newDrive(args).About(drive.AboutArgs{ + err := newDrive(args).About(drive.AboutArgs{ SizeInBytes: args.Bool("sizeInBytes"), ImportFormats: args.Bool("importFormats"), ExportFormats: args.Bool("exportFormats"), }) + checkErr(err) } func printVersion(ctx cli.Context) { diff --git a/util.go b/util.go index 7f43bd2a..44166961 100644 --- a/util.go +++ b/util.go @@ -27,3 +27,10 @@ func ExitF(format string, a ...interface{}) { fmt.Println("") os.Exit(1) } + +func checkErr(err error) { + if err != nil { + fmt.Println(err) + os.Exit(1) + } +} From 33716c2a43f92466401e08533308219fb520af9e Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Mon, 18 Jan 2016 21:35:08 +0100 Subject: [PATCH 022/195] Take output writer as argument --- drive/about.go | 21 +++++++++++---------- drive/delete.go | 4 +++- drive/download.go | 3 ++- drive/info.go | 6 +++++- drive/list.go | 7 +++++-- drive/mkdir.go | 4 +++- drive/share.go | 4 +++- drive/upload.go | 4 +++- drive/url.go | 6 ++++-- handlers.go | 10 ++++++++++ 10 files changed, 49 insertions(+), 20 deletions(-) diff --git a/drive/about.go b/drive/about.go index bea9fda0..dea927ca 100644 --- a/drive/about.go +++ b/drive/about.go @@ -1,12 +1,13 @@ package drive import ( + "io" "fmt" - "os" "text/tabwriter" ) type AboutArgs struct { + Out io.Writer SizeInBytes bool ImportFormats bool ExportFormats bool @@ -19,29 +20,29 @@ func (self *Drive) About(args AboutArgs) (err error) { } if args.ExportFormats { - printSupportedFormats(about.ExportFormats) + printSupportedFormats(args.Out, about.ExportFormats) return } if args.ImportFormats { - printSupportedFormats(about.ImportFormats) + printSupportedFormats(args.Out, about.ImportFormats) return } user := about.User quota := about.StorageQuota - fmt.Printf("User: %s, %s\n", user.DisplayName, user.EmailAddress) - fmt.Printf("Used: %s\n", formatSize(quota.UsageInDrive, args.SizeInBytes)) - fmt.Printf("Free: %s\n", formatSize(quota.Limit - quota.UsageInDrive, args.SizeInBytes)) - fmt.Printf("Total: %s\n", formatSize(quota.Limit, args.SizeInBytes)) - fmt.Printf("Max upload size: %s\n", formatSize(about.MaxUploadSize, args.SizeInBytes)) + fmt.Fprintf(args.Out, "User: %s, %s\n", user.DisplayName, user.EmailAddress) + fmt.Fprintf(args.Out, "Used: %s\n", formatSize(quota.UsageInDrive, args.SizeInBytes)) + fmt.Fprintf(args.Out, "Free: %s\n", formatSize(quota.Limit - quota.UsageInDrive, args.SizeInBytes)) + fmt.Fprintf(args.Out, "Total: %s\n", formatSize(quota.Limit, args.SizeInBytes)) + fmt.Fprintf(args.Out, "Max upload size: %s\n", formatSize(about.MaxUploadSize, args.SizeInBytes)) return } -func printSupportedFormats(formats map[string][]string) { +func printSupportedFormats(out io.Writer, formats map[string][]string) { w := new(tabwriter.Writer) - w.Init(os.Stdout, 0, 0, 3, ' ', 0) + w.Init(out, 0, 0, 3, ' ', 0) fmt.Fprintln(w, "From\tTo") diff --git a/drive/delete.go b/drive/delete.go index 6e868eb2..cc0aeb32 100644 --- a/drive/delete.go +++ b/drive/delete.go @@ -1,10 +1,12 @@ package drive import ( + "io" "fmt" ) type DeleteArgs struct { + Out io.Writer Id string } @@ -19,6 +21,6 @@ func (self *Drive) Delete(args DeleteArgs) (err error) { return fmt.Errorf("Failed to delete file", err) } - fmt.Printf("Removed file '%s'\n", f.Name) + fmt.Fprintf(args.Out, "Removed file '%s'\n", f.Name) return } diff --git a/drive/download.go b/drive/download.go index 57e0160c..31bcc585 100644 --- a/drive/download.go +++ b/drive/download.go @@ -7,6 +7,7 @@ import ( ) type DownloadFileArgs struct { + Out io.Writer Id string Force bool NoProgress bool @@ -55,7 +56,7 @@ func (self *Drive) Download(args DownloadFileArgs) (err error) { return fmt.Errorf("Failed saving file: %s", err) } - fmt.Printf("Downloaded '%s' at %s, total %d\n", f.Name, "x/s", bytes) + fmt.Fprintf(args.Out, "Downloaded '%s' at %s, total %d\n", f.Name, "x/s", bytes) //if deleteSourceFile { // self.Delete(args.Id) diff --git a/drive/info.go b/drive/info.go index ca09d0a8..080c23c9 100644 --- a/drive/info.go +++ b/drive/info.go @@ -1,11 +1,13 @@ package drive import ( + "io" "fmt" "google.golang.org/api/drive/v3" ) type FileInfoArgs struct { + Out io.Writer Id string SizeInBytes bool } @@ -17,6 +19,7 @@ func (self *Drive) Info(args FileInfoArgs) (err error) { } PrintFileInfo(PrintFileInfoArgs{ + Out: args.Out, File: f, SizeInBytes: args.SizeInBytes, }) @@ -25,6 +28,7 @@ func (self *Drive) Info(args FileInfoArgs) (err error) { } type PrintFileInfoArgs struct { + Out io.Writer File *drive.File SizeInBytes bool } @@ -47,7 +51,7 @@ func PrintFileInfo(args PrintFileInfoArgs) { for _, item := range items { if item.value() != "" { - fmt.Printf("%s: %s\n", item.key(), item.value()) + fmt.Fprintf(args.Out, "%s: %s\n", item.key(), item.value()) } } } diff --git a/drive/list.go b/drive/list.go index b2e3662b..5649a124 100644 --- a/drive/list.go +++ b/drive/list.go @@ -2,12 +2,13 @@ package drive import ( "fmt" - "os" + "io" "text/tabwriter" "google.golang.org/api/drive/v3" ) type ListFilesArgs struct { + Out io.Writer MaxFiles int64 NameWidth int64 Query string @@ -22,6 +23,7 @@ func (self *Drive) List(args ListFilesArgs) (err error) { } PrintFileList(PrintFileListArgs{ + Out: args.Out, Files: fileList.Files, NameWidth: int(args.NameWidth), SkipHeader: args.SkipHeader, @@ -32,6 +34,7 @@ func (self *Drive) List(args ListFilesArgs) (err error) { } type PrintFileListArgs struct { + Out io.Writer Files []*drive.File NameWidth int SkipHeader bool @@ -40,7 +43,7 @@ type PrintFileListArgs struct { func PrintFileList(args PrintFileListArgs) { w := new(tabwriter.Writer) - w.Init(os.Stdout, 0, 0, 3, ' ', 0) + w.Init(args.Out, 0, 0, 3, ' ', 0) if !args.SkipHeader { fmt.Fprintln(w, "Id\tName\tSize\tCreated") diff --git a/drive/mkdir.go b/drive/mkdir.go index 40804741..99047ee2 100644 --- a/drive/mkdir.go +++ b/drive/mkdir.go @@ -2,12 +2,14 @@ package drive import ( "google.golang.org/api/drive/v3" + "io" "fmt" ) const DirectoryMimeType = "application/vnd.google-apps.folder" type MkdirArgs struct { + Out io.Writer Name string Parent string Share bool @@ -27,7 +29,7 @@ func (self *Drive) Mkdir(args MkdirArgs) (err error) { return fmt.Errorf("Failed to create folder: %s", err) } - PrintFileInfo(PrintFileInfoArgs{File: f}) + PrintFileInfo(PrintFileInfoArgs{Out: args.Out, File: f}) //if args.Share { // self.Share(TODO) diff --git a/drive/share.go b/drive/share.go index 37c1bf8c..43655df7 100644 --- a/drive/share.go +++ b/drive/share.go @@ -1,11 +1,13 @@ package drive import ( + "io" "fmt" "google.golang.org/api/drive/v3" ) type ShareArgs struct { + Out io.Writer FileId string Role string Type string @@ -34,7 +36,7 @@ func (self *Drive) Share(args ShareArgs) (err error) { return fmt.Errorf("Failed share file: %s", err) } - fmt.Println(p) + fmt.Fprintln(args.Out, p) return } diff --git a/drive/upload.go b/drive/upload.go index 16ae9400..3a65d0fe 100644 --- a/drive/upload.go +++ b/drive/upload.go @@ -4,12 +4,14 @@ import ( "fmt" "mime" "os" + "io" "path/filepath" "google.golang.org/api/drive/v3" "golang.org/x/net/context" ) type UploadFileArgs struct { + Out io.Writer Path string Name string Parent string @@ -61,7 +63,7 @@ func (self *Drive) Upload(args UploadFileArgs) (err error) { return fmt.Errorf("Failed to upload file: %s", err) } - fmt.Printf("Uploaded '%s' at %s, total %d\n", f.Name, "x/s", f.Size) + fmt.Fprintf(args.Out, "Uploaded '%s' at %s, total %d\n", f.Name, "x/s", f.Size) //if args.Share { // self.Share(TODO) //} diff --git a/drive/url.go b/drive/url.go index 2dc429d5..9dee4ad0 100644 --- a/drive/url.go +++ b/drive/url.go @@ -1,20 +1,22 @@ package drive import ( + "io" "fmt" ) type UrlArgs struct { + Out io.Writer FileId string DownloadUrl bool } func (self *Drive) Url(args UrlArgs) { if args.DownloadUrl { - fmt.Println(downloadUrl(args.FileId)) + fmt.Fprintln(args.Out, downloadUrl(args.FileId)) return } - fmt.Println(previewUrl(args.FileId)) + fmt.Fprintln(args.Out, previewUrl(args.FileId)) } func previewUrl(id string) string { diff --git a/handlers.go b/handlers.go index 5ff5cdb0..a36444c9 100644 --- a/handlers.go +++ b/handlers.go @@ -2,6 +2,7 @@ package main import ( "fmt" + "os" "strings" "./cli" "./client" @@ -16,6 +17,7 @@ const TokenFilename = "token_v2.json" func listHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).List(drive.ListFilesArgs{ + Out: os.Stdout, MaxFiles: args.Int64("maxFiles"), NameWidth: args.Int64("nameWidth"), Query: args.String("query"), @@ -28,6 +30,7 @@ func listHandler(ctx cli.Context) { func downloadHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).Download(drive.DownloadFileArgs{ + Out: os.Stdout, Id: args.String("id"), Force: args.Bool("force"), Stdout: args.Bool("stdout"), @@ -39,6 +42,7 @@ func downloadHandler(ctx cli.Context) { func uploadHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).Upload(drive.UploadFileArgs{ + Out: os.Stdout, Path: args.String("path"), Name: args.String("name"), Parent: args.String("parent"), @@ -53,6 +57,7 @@ func uploadHandler(ctx cli.Context) { func infoHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).Info(drive.FileInfoArgs{ + Out: os.Stdout, Id: args.String("id"), SizeInBytes: args.Bool("sizeInBytes"), }) @@ -62,6 +67,7 @@ func infoHandler(ctx cli.Context) { func mkdirHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).Mkdir(drive.MkdirArgs{ + Out: os.Stdout, Name: args.String("name"), Parent: args.String("parent"), Share: args.Bool("share"), @@ -72,6 +78,7 @@ func mkdirHandler(ctx cli.Context) { func shareHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).Share(drive.ShareArgs{ + Out: os.Stdout, FileId: args.String("id"), Role: args.String("role"), Type: args.String("type"), @@ -85,6 +92,7 @@ func shareHandler(ctx cli.Context) { func urlHandler(ctx cli.Context) { args := ctx.Args() newDrive(args).Url(drive.UrlArgs{ + Out: os.Stdout, FileId: args.String("id"), DownloadUrl: args.Bool("download"), }) @@ -93,6 +101,7 @@ func urlHandler(ctx cli.Context) { func deleteHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).Delete(drive.DeleteArgs{ + Out: os.Stdout, Id: args.String("id"), }) checkErr(err) @@ -101,6 +110,7 @@ func deleteHandler(ctx cli.Context) { func aboutHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).About(drive.AboutArgs{ + Out: os.Stdout, SizeInBytes: args.Bool("sizeInBytes"), ImportFormats: args.Bool("importFormats"), ExportFormats: args.Bool("exportFormats"), From e60833f88408139c8a92c3de9e8bfb87f295433e Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Mon, 18 Jan 2016 21:37:56 +0100 Subject: [PATCH 023/195] Simplify kv type --- drive/info.go | 4 ++-- drive/util.go | 11 +++-------- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/drive/info.go b/drive/info.go index 080c23c9..936b7da1 100644 --- a/drive/info.go +++ b/drive/info.go @@ -50,8 +50,8 @@ func PrintFileInfo(args PrintFileInfoArgs) { } for _, item := range items { - if item.value() != "" { - fmt.Fprintf(args.Out, "%s: %s\n", item.key(), item.value()) + if item.value != "" { + fmt.Fprintf(args.Out, "%s: %s\n", item.key, item.value) } } } diff --git a/drive/util.go b/drive/util.go index 7ad0945c..be01c80e 100644 --- a/drive/util.go +++ b/drive/util.go @@ -9,14 +9,9 @@ import ( "time" ) -type kv [2]string - -func (self kv) key() string { - return self[0] -} - -func (self kv) value() string { - return self[1] +type kv struct { + key string + value string } func formatList(a []string) string { From 4f4152ccf32acbd392c7d80e45834ca1f3ea2d62 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Mon, 18 Jan 2016 21:54:26 +0100 Subject: [PATCH 024/195] Simplify drive wrapper, s/client/auth/ --- client/auth.go => auth/oauth.go | 2 +- {client => auth}/token.go | 2 +- {client => auth}/util.go | 2 +- client/client.go | 28 ---------------------------- drive/drive.go | 16 ++++++---------- handlers.go | 8 ++++---- 6 files changed, 13 insertions(+), 45 deletions(-) rename client/auth.go => auth/oauth.go (98%) rename {client => auth}/token.go (98%) rename {client => auth}/util.go (95%) delete mode 100644 client/client.go diff --git a/client/auth.go b/auth/oauth.go similarity index 98% rename from client/auth.go rename to auth/oauth.go index 9ea1ecea..b8f1d475 100644 --- a/client/auth.go +++ b/auth/oauth.go @@ -1,4 +1,4 @@ -package client +package auth import ( "net/http" diff --git a/client/token.go b/auth/token.go similarity index 98% rename from client/token.go rename to auth/token.go index 09312844..926d9f68 100644 --- a/client/token.go +++ b/auth/token.go @@ -1,4 +1,4 @@ -package client +package auth import ( "golang.org/x/oauth2" diff --git a/client/util.go b/auth/util.go similarity index 95% rename from client/util.go rename to auth/util.go index b600fd6f..b053c1fd 100644 --- a/client/util.go +++ b/auth/util.go @@ -1,4 +1,4 @@ -package client +package auth import ( "os" diff --git a/client/client.go b/client/client.go deleted file mode 100644 index 1b48bbb4..00000000 --- a/client/client.go +++ /dev/null @@ -1,28 +0,0 @@ -package client - -import ( - "net/http" - "google.golang.org/api/drive/v3" -) - -type Client struct { - service *drive.Service - http *http.Client -} - -func (self *Client) Service() *drive.Service { - return self.service -} - -func (self *Client) Http() *http.Client { - return self.http -} - -func NewClient(client *http.Client) (*Client, error) { - service, err := drive.New(client) - if err != nil { - return nil, err - } - - return &Client{service, client}, nil -} diff --git a/drive/drive.go b/drive/drive.go index 047030a9..d908beb2 100644 --- a/drive/drive.go +++ b/drive/drive.go @@ -5,19 +5,15 @@ import ( "google.golang.org/api/drive/v3" ) -type Client interface { - Service() *drive.Service - Http() *http.Client -} - type Drive struct { service *drive.Service - http *http.Client } -func NewDrive(client Client) *Drive { - return &Drive{ - service: client.Service(), - http: client.Http(), +func New(client *http.Client) (*Drive, error) { + service, err := drive.New(client) + if err != nil { + return nil, err } + + return &Drive{service}, nil } diff --git a/handlers.go b/handlers.go index a36444c9..f9775f79 100644 --- a/handlers.go +++ b/handlers.go @@ -5,7 +5,7 @@ import ( "os" "strings" "./cli" - "./client" + "./auth" "./drive" ) @@ -155,17 +155,17 @@ func printCommandHelp(ctx cli.Context) { func newDrive(args cli.Arguments) *drive.Drive { configDir := args.String("configDir") tokenPath := ConfigFilePath(configDir, TokenFilename) - oauth, err := client.NewOauthClient(ClientId, ClientSecret, tokenPath, authCodePrompt) + oauth, err := auth.NewOauthClient(ClientId, ClientSecret, tokenPath, authCodePrompt) if err != nil { ExitF("Failed getting oauth client: %s", err.Error()) } - client, err := client.NewClient(oauth) + client, err := drive.New(oauth) if err != nil { ExitF("Failed getting drive: %s", err.Error()) } - return drive.NewDrive(client) + return client } func authCodePrompt(url string) func() string { From 2be43fe18545ba6c35ee344b9880d48a18afe878 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Mon, 18 Jan 2016 22:17:50 +0100 Subject: [PATCH 025/195] Store captured values as interface{} and type cast --- cli/context.go | 16 ++++------------ cli/flags.go | 5 ----- cli/parser.go | 41 +++++++++++++++++++++-------------------- 3 files changed, 25 insertions(+), 37 deletions(-) diff --git a/cli/context.go b/cli/context.go index b1037b09..ab40c871 100644 --- a/cli/context.go +++ b/cli/context.go @@ -1,8 +1,5 @@ package cli -import ( - "strconv" -) type Context struct { args Arguments @@ -21,21 +18,16 @@ func (self Context) FilterHandlers(prefix string) []*Handler { return filterHandlers(self.handlers, prefix) } -type Arguments map[string]string +type Arguments map[string]interface{} func (self Arguments) String(key string) string { - value, _ := self[key] - return value + return self[key].(string) } func (self Arguments) Int64(key string) int64 { - value, _ := self[key] - n, _ := strconv.ParseInt(value, 10, 64) - return n + return self[key].(int64) } func (self Arguments) Bool(key string) bool { - value, _ := self[key] - b, _ := strconv.ParseBool(value) - return b + return self[key].(bool) } diff --git a/cli/flags.go b/cli/flags.go index a5aa2761..6fcc435a 100644 --- a/cli/flags.go +++ b/cli/flags.go @@ -1,10 +1,5 @@ package cli -// TODO -// Default values? Default string values? Parser must always return a value -// Support invalid flag combinations? - - type Flag interface { GetPatterns() []string GetName() string diff --git a/cli/parser.go b/cli/parser.go index 433a4b1b..ff50eecc 100644 --- a/cli/parser.go +++ b/cli/parser.go @@ -7,7 +7,7 @@ import ( type Parser interface { Match([]string) ([]string, bool) - Capture([]string) ([]string, map[string]string) + Capture([]string) ([]string, map[string]interface{}) } type CompleteParser struct { @@ -28,12 +28,12 @@ func (self CompleteParser) Match(values []string) ([]string, bool) { return remainingValues, len(remainingValues) == 0 } -func (self CompleteParser) Capture(values []string) ([]string, map[string]string) { +func (self CompleteParser) Capture(values []string) ([]string, map[string]interface{}) { remainingValues := values - data := map[string]string{} + data := map[string]interface{}{} for _, parser := range self.parsers { - var captured map[string]string + var captured map[string]interface{} remainingValues, captured = parser.Capture(remainingValues) for key, value := range captured { data[key] = value @@ -64,7 +64,7 @@ func (self EqualParser) Match(values []string) ([]string, bool) { return values, false } -func (self EqualParser) Capture(values []string) ([]string, map[string]string) { +func (self EqualParser) Capture(values []string) ([]string, map[string]interface{}) { remainingValues, _ := self.Match(values) return remainingValues, nil } @@ -90,9 +90,9 @@ func (self CaptureGroupParser) key() string { return self.value[1:len(self.value) - 1] } -func (self CaptureGroupParser) Capture(values []string) ([]string, map[string]string) { +func (self CaptureGroupParser) Capture(values []string) ([]string, map[string]interface{}) { if remainingValues, ok := self.Match(values); ok { - return remainingValues, map[string]string{self.key(): values[0]} + return remainingValues, map[string]interface{}{self.key(): values[0]} } return values, nil @@ -140,12 +140,12 @@ func (self BoolFlagParser) Match(values []string) ([]string, bool) { } } -func (self BoolFlagParser) Capture(values []string) ([]string, map[string]string) { +func (self BoolFlagParser) Capture(values []string) ([]string, map[string]interface{}) { remainingValues, ok := self.Match(values) if !ok && !self.omitValue { - return remainingValues, map[string]string{self.key: fmt.Sprintf("%t", self.defaultValue)} + return remainingValues, map[string]interface{}{self.key: self.defaultValue} } - return remainingValues, map[string]string{self.key: fmt.Sprintf("%t", ok)} + return remainingValues, map[string]interface{}{self.key: ok} } func (self BoolFlagParser) String() string { @@ -170,13 +170,13 @@ func (self StringFlagParser) Match(values []string) ([]string, bool) { return values[2:], true } -func (self StringFlagParser) Capture(values []string) ([]string, map[string]string) { +func (self StringFlagParser) Capture(values []string) ([]string, map[string]interface{}) { remainingValues, ok := self.Match(values) if ok { - return remainingValues, map[string]string{self.key: values[1]} + return remainingValues, map[string]interface{}{self.key: values[1]} } - return values, map[string]string{self.key: self.defaultValue} + return values, map[string]interface{}{self.key: self.defaultValue} } func (self StringFlagParser) String() string { @@ -206,13 +206,14 @@ func (self IntFlagParser) Match(values []string) ([]string, bool) { return values[2:], true } -func (self IntFlagParser) Capture(values []string) ([]string, map[string]string) { +func (self IntFlagParser) Capture(values []string) ([]string, map[string]interface{}) { remainingValues, ok := self.Match(values) if ok { - return remainingValues, map[string]string{self.key: values[1]} + n, _ := strconv.ParseInt(values[1], 10, 64) + return remainingValues, map[string]interface{}{self.key: n} } - return values, map[string]string{self.key: fmt.Sprintf("%d", self.defaultValue)} + return values, map[string]interface{}{self.key: self.defaultValue} } func (self IntFlagParser) String() string { @@ -244,12 +245,12 @@ func (self FlagParser) Match(values []string) ([]string, bool) { return remainingValues, true } -func (self FlagParser) Capture(values []string) ([]string, map[string]string) { - data := map[string]string{} +func (self FlagParser) Capture(values []string) ([]string, map[string]interface{}) { + data := map[string]interface{}{} remainingValues := values for _, parser := range self.parsers { - var captured map[string]string + var captured map[string]interface{} remainingValues, captured = parser.Capture(remainingValues) for key, value := range captured { // Skip value if it already exists and new value is an empty string @@ -286,7 +287,7 @@ func (self ShortCircuitParser) Match(values []string) ([]string, bool) { return remainingValues, false } -func (self ShortCircuitParser) Capture(values []string) ([]string, map[string]string) { +func (self ShortCircuitParser) Capture(values []string) ([]string, map[string]interface{}) { if len(self.parsers) == 0 { return values, nil } From 44aa7407f6bdbc5157104969ef275cd5e51a99af Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Mon, 18 Jan 2016 23:03:44 +0100 Subject: [PATCH 026/195] Add StringSliceFlag and StringSliceParser --- cli/context.go | 4 ++++ cli/flags.go | 41 ++++++++++++++++++++++++++++++++++++++--- cli/parser.go | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 88 insertions(+), 3 deletions(-) diff --git a/cli/context.go b/cli/context.go index ab40c871..c1092819 100644 --- a/cli/context.go +++ b/cli/context.go @@ -31,3 +31,7 @@ func (self Arguments) Int64(key string) int64 { func (self Arguments) Bool(key string) bool { return self[key].(bool) } + +func (self Arguments) StringSlice(key string) []string { + return self[key].([]string) +} diff --git a/cli/flags.go b/cli/flags.go index 6fcc435a..6c82ed76 100644 --- a/cli/flags.go +++ b/cli/flags.go @@ -27,7 +27,7 @@ type BoolFlag struct { } func (self BoolFlag) GetName() string { - return self.Name + return self.Name } func (self BoolFlag) GetPatterns() []string { @@ -64,7 +64,7 @@ type StringFlag struct { } func (self StringFlag) GetName() string { - return self.Name + return self.Name } func (self StringFlag) GetPatterns() []string { @@ -99,7 +99,7 @@ type IntFlag struct { } func (self IntFlag) GetName() string { - return self.Name + return self.Name } func (self IntFlag) GetPatterns() []string { @@ -125,3 +125,38 @@ func (self IntFlag) GetParser() Parser { } return ShortCircuitParser{parsers} } + +type StringSliceFlag struct { + Patterns []string + Name string + Description string + DefaultValue []string +} + +func (self StringSliceFlag) GetName() string { + return self.Name +} + +func (self StringSliceFlag) GetPatterns() []string { + return self.Patterns +} + +func (self StringSliceFlag) GetDescription() string { + return self.Description +} + +func (self StringSliceFlag) GetParser() Parser { + var parsers []Parser + for _, p := range self.Patterns { + parsers = append(parsers, StringSliceFlagParser{ + pattern: p, + key: self.Name, + defaultValue: self.DefaultValue, + }) + } + + if len(parsers) == 1 { + return parsers[0] + } + return ShortCircuitParser{parsers} +} diff --git a/cli/parser.go b/cli/parser.go index ff50eecc..d6706d5e 100644 --- a/cli/parser.go +++ b/cli/parser.go @@ -221,6 +221,52 @@ func (self IntFlagParser) String() string { } +type StringSliceFlagParser struct { + pattern string + key string + defaultValue []string +} + +func (self StringSliceFlagParser) Match(values []string) ([]string, bool) { + if len(values) < 2 { + return values, false + } + + var remainingValues []string + + for i := 0; i < len(values); i++ { + if values[i] == self.pattern && i + 1 < len(values) { + i++ + continue + } + remainingValues = append(remainingValues, values[i]) + } + + return remainingValues, len(values) != len(remainingValues) +} + +func (self StringSliceFlagParser) Capture(values []string) ([]string, map[string]interface{}) { + remainingValues, ok := self.Match(values) + if !ok { + return values, map[string]interface{}{self.key: self.defaultValue} + } + + var captured []string + + for i := 0; i < len(values); i++ { + if values[i] == self.pattern && i + 1 < len(values) { + captured = append(captured, values[i + 1]) + } + } + + return remainingValues, map[string]interface{}{self.key: captured} +} + +func (self StringSliceFlagParser) String() string { + return fmt.Sprintf("StringSliceFlagParser '%s'", self.pattern) +} + + type FlagParser struct { parsers []Parser } From b7f7493f1611fe51d8ba803122a3ca37e760c8ad Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Wed, 20 Jan 2016 21:40:17 +0100 Subject: [PATCH 027/195] Allow multiple parent flags --- drive/mkdir.go | 8 +++----- drive/upload.go | 8 +++----- gdrive.go | 8 ++++---- handlers.go | 4 ++-- 4 files changed, 12 insertions(+), 16 deletions(-) diff --git a/drive/mkdir.go b/drive/mkdir.go index 99047ee2..8b3acc24 100644 --- a/drive/mkdir.go +++ b/drive/mkdir.go @@ -11,17 +11,15 @@ const DirectoryMimeType = "application/vnd.google-apps.folder" type MkdirArgs struct { Out io.Writer Name string - Parent string + Parents []string Share bool } func (self *Drive) Mkdir(args MkdirArgs) (err error) { dstFile := &drive.File{Name: args.Name, MimeType: DirectoryMimeType} - // Set parent folder if provided - if args.Parent != "" { - dstFile.Parents = []string{args.Parent} - } + // Set parent folders + dstFile.Parents = args.Parents // Create folder f, err := self.service.Files.Create(dstFile).Do() diff --git a/drive/upload.go b/drive/upload.go index 3a65d0fe..fdba5ccc 100644 --- a/drive/upload.go +++ b/drive/upload.go @@ -14,7 +14,7 @@ type UploadFileArgs struct { Out io.Writer Path string Name string - Parent string + Parents []string Mime string Recursive bool Stdin bool @@ -53,10 +53,8 @@ func (self *Drive) Upload(args UploadFileArgs) (err error) { dstFile.MimeType = args.Mime } - // Set parent folder if provided - if args.Parent != "" { - dstFile.Parents = []string{args.Parent} - } + // Set parent folders + dstFile.Parents = args.Parents f, err := self.service.Files.Create(dstFile).ResumableMedia(context.Background(), srcFile, srcFileInfo.Size(), dstFile.MimeType).Do() if err != nil { diff --git a/gdrive.go b/gdrive.go index f884b8e2..97f5e91b 100644 --- a/gdrive.go +++ b/gdrive.go @@ -109,10 +109,10 @@ func main() { Description: "Upload directory recursively", OmitValue: true, }, - cli.StringFlag{ + cli.StringSliceFlag{ Name: "parent", Patterns: []string{"-p", "--parent"}, - Description: "Parent id, used to upload file to a specific directory", + Description: "Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents", }, cli.StringFlag{ Name: "name", @@ -168,10 +168,10 @@ func main() { Flags: cli.Flags{ "global options": globalFlags, "options": []cli.Flag{ - cli.StringFlag{ + cli.StringSliceFlag{ Name: "parent", Patterns: []string{"-p", "--parent"}, - Description: "Parent id of created directory", + Description: "Parent id of created directory, can be specified multiple times to give many parents", }, cli.BoolFlag{ Name: "share", diff --git a/handlers.go b/handlers.go index f9775f79..5358c9bd 100644 --- a/handlers.go +++ b/handlers.go @@ -45,7 +45,7 @@ func uploadHandler(ctx cli.Context) { Out: os.Stdout, Path: args.String("path"), Name: args.String("name"), - Parent: args.String("parent"), + Parents: args.StringSlice("parent"), Mime: args.String("mime"), Recursive: args.Bool("recursive"), Stdin: args.Bool("stdin"), @@ -69,7 +69,7 @@ func mkdirHandler(ctx cli.Context) { err := newDrive(args).Mkdir(drive.MkdirArgs{ Out: os.Stdout, Name: args.String("name"), - Parent: args.String("parent"), + Parents: args.StringSlice("parent"), Share: args.Bool("share"), }) checkErr(err) From f90c11bfefcb7b82cb1032b0c43e3b35db4c2e73 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Thu, 21 Jan 2016 22:07:36 +0100 Subject: [PATCH 028/195] Improve parsers --- cli/parser.go | 225 +++++++++++++++++++++++++------------------------- 1 file changed, 111 insertions(+), 114 deletions(-) diff --git a/cli/parser.go b/cli/parser.go index d6706d5e..e80750ec 100644 --- a/cli/parser.go +++ b/cli/parser.go @@ -10,44 +10,6 @@ type Parser interface { Capture([]string) ([]string, map[string]interface{}) } -type CompleteParser struct { - parsers []Parser -} - -func (self CompleteParser) Match(values []string) ([]string, bool) { - remainingValues := values - - for _, parser := range self.parsers { - var ok bool - remainingValues, ok = parser.Match(remainingValues) - if !ok { - return remainingValues, false - } - } - - return remainingValues, len(remainingValues) == 0 -} - -func (self CompleteParser) Capture(values []string) ([]string, map[string]interface{}) { - remainingValues := values - data := map[string]interface{}{} - - for _, parser := range self.parsers { - var captured map[string]interface{} - remainingValues, captured = parser.Capture(remainingValues) - for key, value := range captured { - data[key] = value - } - } - - return remainingValues, data -} - -func (self CompleteParser) String() string { - return fmt.Sprintf("CompleteParser %v", self.parsers) -} - - type EqualParser struct { value string } @@ -113,39 +75,35 @@ type BoolFlagParser struct { func (self BoolFlagParser) Match(values []string) ([]string, bool) { if self.omitValue { - if len(values) == 0 { - return values, false - } - - if self.pattern == values[0] { - return values[1:], true - } - - return values, false - } else { - if len(values) < 2 { - return values, false - } - - if self.pattern != values[0] { - return values, false - } + return flagKeyMatch(self.pattern, values, 0) + } - // Check that value is a valid boolean - if _, err := strconv.ParseBool(values[1]); err != nil { - return values, false - } + remaining, value, ok := flagKeyValueMatch(self.pattern, values, 0) + if !ok { + return remaining, false + } - return values[2:], true + // Check that value is a valid boolean + if _, err := strconv.ParseBool(value); err != nil { + return remaining, false } + + return remaining, true } func (self BoolFlagParser) Capture(values []string) ([]string, map[string]interface{}) { - remainingValues, ok := self.Match(values) - if !ok && !self.omitValue { - return remainingValues, map[string]interface{}{self.key: self.defaultValue} + if self.omitValue { + remaining, ok := flagKeyMatch(self.pattern, values, 0) + return remaining, map[string]interface{}{self.key: ok} } - return remainingValues, map[string]interface{}{self.key: ok} + + remaining, value, ok := flagKeyValueMatch(self.pattern, values, 0) + if !ok { + return remaining, map[string]interface{}{self.key: self.defaultValue} + } + + b, _ := strconv.ParseBool(value) + return remaining, map[string]interface{}{self.key: b} } func (self BoolFlagParser) String() string { @@ -159,24 +117,17 @@ type StringFlagParser struct { } func (self StringFlagParser) Match(values []string) ([]string, bool) { - if len(values) < 2 { - return values, false - } - - if self.pattern != values[0] { - return values, false - } - - return values[2:], true + remaining, _, ok := flagKeyValueMatch(self.pattern, values, 0) + return remaining, ok } func (self StringFlagParser) Capture(values []string) ([]string, map[string]interface{}) { - remainingValues, ok := self.Match(values) - if ok { - return remainingValues, map[string]interface{}{self.key: values[1]} + remaining, value, ok := flagKeyValueMatch(self.pattern, values, 0) + if !ok { + return remaining, map[string]interface{}{self.key: self.defaultValue} } - return values, map[string]interface{}{self.key: self.defaultValue} + return remaining, map[string]interface{}{self.key: value} } func (self StringFlagParser) String() string { @@ -190,30 +141,27 @@ type IntFlagParser struct { } func (self IntFlagParser) Match(values []string) ([]string, bool) { - if len(values) < 2 { - return values, false - } - - if self.pattern != values[0] { - return values, false + remaining, value, ok := flagKeyValueMatch(self.pattern, values, 0) + if !ok { + return remaining, false } // Check that value is a valid integer - if _, err := strconv.ParseInt(values[1], 10, 64); err != nil { - return values, false + if _, err := strconv.ParseInt(value, 10, 64); err != nil { + return remaining, false } - return values[2:], true + return remaining, true } func (self IntFlagParser) Capture(values []string) ([]string, map[string]interface{}) { - remainingValues, ok := self.Match(values) - if ok { - n, _ := strconv.ParseInt(values[1], 10, 64) - return remainingValues, map[string]interface{}{self.key: n} + remaining, value, ok := flagKeyValueMatch(self.pattern, values, 0) + if !ok { + return remaining, map[string]interface{}{self.key: self.defaultValue} } - return values, map[string]interface{}{self.key: self.defaultValue} + n, _ := strconv.ParseInt(value, 10, 64) + return remaining, map[string]interface{}{self.key: n} } func (self IntFlagParser) String() string { @@ -273,41 +221,26 @@ type FlagParser struct { func (self FlagParser) Match(values []string) ([]string, bool) { remainingValues := values - var oneOrMoreMatches bool for _, parser := range self.parsers { - var ok bool - remainingValues, ok = parser.Match(remainingValues) - if ok { - oneOrMoreMatches = true - } + remainingValues, _ = parser.Match(remainingValues) } - - // Recurse while we have one or more matches - if oneOrMoreMatches { - return self.Match(remainingValues) - } - return remainingValues, true } func (self FlagParser) Capture(values []string) ([]string, map[string]interface{}) { - data := map[string]interface{}{} + captured := map[string]interface{}{} remainingValues := values for _, parser := range self.parsers { - var captured map[string]interface{} - remainingValues, captured = parser.Capture(remainingValues) - for key, value := range captured { - // Skip value if it already exists and new value is an empty string - if _, exists := data[key]; exists && value == "" { - continue - } - - data[key] = value + var data map[string]interface{} + remainingValues, data = parser.Capture(remainingValues) + for key, value := range data { + captured[key] = value } } - return remainingValues, data + + return remainingValues, captured } func (self FlagParser) String() string { @@ -352,3 +285,67 @@ func (self ShortCircuitParser) Capture(values []string) ([]string, map[string]in func (self ShortCircuitParser) String() string { return fmt.Sprintf("ShortCircuitParser %v", self.parsers) } + +type CompleteParser struct { + parsers []Parser +} + +func (self CompleteParser) Match(values []string) ([]string, bool) { + remainingValues := values + + for _, parser := range self.parsers { + var ok bool + remainingValues, ok = parser.Match(remainingValues) + if !ok { + return remainingValues, false + } + } + + return remainingValues, len(remainingValues) == 0 +} + +func (self CompleteParser) Capture(values []string) ([]string, map[string]interface{}) { + remainingValues := values + data := map[string]interface{}{} + + for _, parser := range self.parsers { + var captured map[string]interface{} + remainingValues, captured = parser.Capture(remainingValues) + for key, value := range captured { + data[key] = value + } + } + + return remainingValues, data +} + +func (self CompleteParser) String() string { + return fmt.Sprintf("CompleteParser %v", self.parsers) +} + +func flagKeyValueMatch(key string, values []string, index int) ([]string, string, bool) { + if index > len(values) - 2 { + return values, "", false + } + + if values[index] == key { + value := values[index + 1] + remaining := append(values[:index], values[index + 2:]...) + return remaining, value, true + } + + return flagKeyValueMatch(key, values, index + 1) +} + +func flagKeyMatch(key string, values []string, index int) ([]string, bool) { + if index > len(values) - 1 { + return values, false + } + + if values[index] == key { + remaining := append(values[:index], values[index + 1:]...) + return remaining, true + } + + return flagKeyMatch(key, values, index + 1) +} From 1fe1ad062175bca2891182698338a7bd40ce8982 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Fri, 22 Jan 2016 23:00:45 +0100 Subject: [PATCH 029/195] Copy slice --- cli/parser.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/cli/parser.go b/cli/parser.go index e80750ec..50bd0bd3 100644 --- a/cli/parser.go +++ b/cli/parser.go @@ -291,7 +291,7 @@ type CompleteParser struct { } func (self CompleteParser) Match(values []string) ([]string, bool) { - remainingValues := values + remainingValues := copySlice(values) for _, parser := range self.parsers { var ok bool @@ -305,7 +305,7 @@ func (self CompleteParser) Match(values []string) ([]string, bool) { } func (self CompleteParser) Capture(values []string) ([]string, map[string]interface{}) { - remainingValues := values + remainingValues := copySlice(values) data := map[string]interface{}{} for _, parser := range self.parsers { @@ -349,3 +349,9 @@ func flagKeyMatch(key string, values []string, index int) ([]string, bool) { return flagKeyMatch(key, values, index + 1) } + +func copySlice(a []string) []string { + b := make([]string, len(a)) + copy(b, a) + return b +} From 379c198883530b944c3b61d7b342bdad6d995163 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Fri, 22 Jan 2016 23:53:27 +0100 Subject: [PATCH 030/195] Implement export --- drive/export.go | 111 ++++++++++++++++++++++++++++++++++++++++++++++++ gdrive.go | 27 ++++++++++++ handlers.go | 12 ++++++ 3 files changed, 150 insertions(+) create mode 100644 drive/export.go diff --git a/drive/export.go b/drive/export.go new file mode 100644 index 00000000..2cbc265a --- /dev/null +++ b/drive/export.go @@ -0,0 +1,111 @@ +package drive + +import ( + "io" + "os" + "fmt" + "mime" +) + +var DefaultExportMime = map[string]string{ + "application/vnd.google-apps.form": "application/zip", + "application/vnd.google-apps.document": "application/pdf", + "application/vnd.google-apps.drawing": "image/svg+xml", + "application/vnd.google-apps.spreadsheet": "text/csv", + "application/vnd.google-apps.script": "application/vnd.google-apps.script+json", + "application/vnd.google-apps.presentation": "application/pdf", +} + +type ExportArgs struct { + Out io.Writer + Id string + PrintMimes bool + Mime string + Force bool +} + +func (self *Drive) Export(args ExportArgs) (err error) { + f, err := self.service.Files.Get(args.Id).Fields("name", "mimeType").Do() + if err != nil { + return fmt.Errorf("Failed to get file: %s", err) + } + + if args.PrintMimes { + return self.printMimes(args.Out, f.MimeType) + } + + exportMime, err := getExportMime(args.Mime, f.MimeType) + if err != nil { + return err + } + + filename := getExportFilename(f.Name, exportMime) + + res, err := self.service.Files.Export(args.Id, exportMime).Download() + if err != nil { + return fmt.Errorf("Failed to download file: %s", err) + } + + // Close body on function exit + defer res.Body.Close() + + // Check if file exists + if !args.Force && fileExists(filename) { + return fmt.Errorf("File '%s' already exists, use --force to overwrite", filename) + } + + // Create new file + outFile, err := os.Create(filename) + if err != nil { + return fmt.Errorf("Unable to create new file '%s': %s", filename, err) + } + + // Close file on function exit + defer outFile.Close() + + // Save file to disk + bytes, err := io.Copy(outFile, res.Body) + if err != nil { + return fmt.Errorf("Failed saving file: %s", err) + } + + fmt.Fprintf(args.Out, "Exported '%s' at %s, total %d\n", filename, "x/s", bytes) + return +} + +func (self *Drive) printMimes(out io.Writer, mimeType string) error { + about, err := self.service.About.Get().Fields("exportFormats").Do() + if err != nil { + return fmt.Errorf("Failed to get about: %s", err) + } + + mimes, ok := about.ExportFormats[mimeType] + if !ok { + return fmt.Errorf("File with type '%s' cannot be exported", mimeType) + } + + fmt.Fprintf(out, "Available mime types: %s\n", formatList(mimes)) + return nil +} + +func getExportMime(userMime, fileMime string) (string, error) { + if userMime != "" { + return userMime, nil + } + + defaultMime, ok := DefaultExportMime[fileMime] + if !ok { + return "", fmt.Errorf("File with type '%s' does not have a default export mime, and can probably not be exported", fileMime) + } + + return defaultMime, nil +} + +func getExportFilename(name, mimeType string) string { + extensions, err := mime.ExtensionsByType(mimeType) + if err != nil { + return name + } + + return name + extensions[0] +} diff --git a/gdrive.go b/gdrive.go index 97f5e91b..14a3fe9d 100644 --- a/gdrive.go +++ b/gdrive.go @@ -161,6 +161,33 @@ func main() { }, }, }, + &cli.Handler{ + Pattern: "[global options] export [options] ", + Description: "Export a google document", + Callback: exportHandler, + Flags: cli.Flags{ + "global options": globalFlags, + "options": []cli.Flag{ + cli.BoolFlag{ + Name: "force", + Patterns: []string{"-f", "--force"}, + Description: "Overwrite existing file", + OmitValue: true, + }, + cli.StringFlag{ + Name: "mime", + Patterns: []string{"--mime"}, + Description: "Mime type of exported file", + }, + cli.BoolFlag{ + Name: "printMimes", + Patterns: []string{"--print-mimes"}, + Description: "Print available mime types for given file", + OmitValue: true, + }, + }, + }, + }, &cli.Handler{ Pattern: "[global options] mkdir [options] ", Description: "Create directory", diff --git a/handlers.go b/handlers.go index 5358c9bd..31a1887c 100644 --- a/handlers.go +++ b/handlers.go @@ -64,6 +64,18 @@ func infoHandler(ctx cli.Context) { checkErr(err) } +func exportHandler(ctx cli.Context) { + args := ctx.Args() + err := newDrive(args).Export(drive.ExportArgs{ + Out: os.Stdout, + Id: args.String("id"), + Mime: args.String("mime"), + PrintMimes: args.Bool("printMimes"), + Force: args.Bool("force"), + }) + checkErr(err) +} + func mkdirHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).Mkdir(drive.MkdirArgs{ From 4e0cf6011e0da8bbaa9b9b63ea347e4dc59b9ca3 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 23 Jan 2016 10:57:13 +0100 Subject: [PATCH 031/195] Prevent mutation of input values --- cli/parser.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cli/parser.go b/cli/parser.go index 50bd0bd3..5fbbe3f5 100644 --- a/cli/parser.go +++ b/cli/parser.go @@ -330,7 +330,7 @@ func flagKeyValueMatch(key string, values []string, index int) ([]string, string if values[index] == key { value := values[index + 1] - remaining := append(values[:index], values[index + 2:]...) + remaining := append(copySlice(values[:index]), values[index + 2:]...) return remaining, value, true } @@ -343,7 +343,7 @@ func flagKeyMatch(key string, values []string, index int) ([]string, bool) { } if values[index] == key { - remaining := append(values[:index], values[index + 1:]...) + remaining := append(copySlice(values[:index]), values[index + 1:]...) return remaining, true } From ef9d5e6b127e055cac1fd733d49304dd28780ccd Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 23 Jan 2016 12:03:30 +0100 Subject: [PATCH 032/195] Implement list revisions --- drive/revision_list.go | 62 ++++++++++++++++++++++++++++++++++++++++++ gdrive.go | 28 +++++++++++++++++++ handlers.go | 12 ++++++++ 3 files changed, 102 insertions(+) create mode 100644 drive/revision_list.go diff --git a/drive/revision_list.go b/drive/revision_list.go new file mode 100644 index 00000000..941fbca9 --- /dev/null +++ b/drive/revision_list.go @@ -0,0 +1,62 @@ +package drive + +import ( + "fmt" + "io" + "text/tabwriter" + "google.golang.org/api/drive/v3" +) + +type ListRevisionsArgs struct { + Out io.Writer + Id string + NameWidth int64 + SkipHeader bool + SizeInBytes bool +} + +func (self *Drive) ListRevisions(args ListRevisionsArgs) (err error) { + revList, err := self.service.Revisions.List(args.Id).Fields("revisions(id,keepForever,size,modifiedTime,originalFilename)").Do() + if err != nil { + return fmt.Errorf("Failed listing revisions: %s", err) + } + + PrintRevisionList(PrintRevisionListArgs{ + Out: args.Out, + Revisions: revList.Revisions, + NameWidth: int(args.NameWidth), + SkipHeader: args.SkipHeader, + SizeInBytes: args.SizeInBytes, + }) + + return +} + +type PrintRevisionListArgs struct { + Out io.Writer + Revisions []*drive.Revision + NameWidth int + SkipHeader bool + SizeInBytes bool +} + +func PrintRevisionList(args PrintRevisionListArgs) { + w := new(tabwriter.Writer) + w.Init(args.Out, 0, 0, 3, ' ', 0) + + if !args.SkipHeader { + fmt.Fprintln(w, "Id\tName\tSize\tModified\tKeepForever") + } + + for _, rev := range args.Revisions { + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", + rev.Id, + truncateString(rev.OriginalFilename, args.NameWidth), + formatSize(rev.Size, args.SizeInBytes), + formatDatetime(rev.ModifiedTime), + formatBool(rev.KeepForever), + ) + } + + w.Flush() +} diff --git a/gdrive.go b/gdrive.go index 14a3fe9d..78b168a6 100644 --- a/gdrive.go +++ b/gdrive.go @@ -68,6 +68,34 @@ func main() { }, }, }, + &cli.Handler{ + Pattern: "[global options] list revisions [options] ", + Description: "List file revisions", + Callback: listRevisionsHandler, + Flags: cli.Flags{ + "global options": globalFlags, + "options": []cli.Flag{ + cli.IntFlag{ + Name: "nameWidth", + Patterns: []string{"--name-width"}, + Description: fmt.Sprintf("Width of name column, default: %d, minimum: 9, use 0 for full width", DefaultNameWidth), + DefaultValue: DefaultNameWidth, + }, + cli.BoolFlag{ + Name: "skipHeader", + Patterns: []string{"--no-header"}, + Description: "Dont print the header", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "sizeInBytes", + Patterns: []string{"--bytes"}, + Description: "Size in bytes", + OmitValue: true, + }, + }, + }, + }, &cli.Handler{ Pattern: "[global options] download [options] ", Description: "Download file or directory", diff --git a/handlers.go b/handlers.go index 31a1887c..5e681ab2 100644 --- a/handlers.go +++ b/handlers.go @@ -76,6 +76,18 @@ func exportHandler(ctx cli.Context) { checkErr(err) } +func listRevisionsHandler(ctx cli.Context) { + args := ctx.Args() + err := newDrive(args).ListRevisions(drive.ListRevisionsArgs{ + Out: os.Stdout, + Id: args.String("id"), + NameWidth: args.Int64("nameWidth"), + SizeInBytes: args.Bool("sizeInBytes"), + SkipHeader: args.Bool("skipHeader"), + }) + checkErr(err) +} + func mkdirHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).Mkdir(drive.MkdirArgs{ From 6b3da5bcd3891cb6f084a755aea966d7c23f811a Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 23 Jan 2016 15:16:53 +0100 Subject: [PATCH 033/195] Move things around, support subcommand help --- cli/context.go | 4 -- cli/handler.go | 52 ++++++---------------- gdrive.go | 9 +++- handlers.go => handlers_drive.go | 35 --------------- handlers_meta.go | 76 ++++++++++++++++++++++++++++++++ util.go | 22 +++++++++ 6 files changed, 118 insertions(+), 80 deletions(-) rename handlers.go => handlers_drive.go (83%) create mode 100644 handlers_meta.go diff --git a/cli/context.go b/cli/context.go index c1092819..ce82b175 100644 --- a/cli/context.go +++ b/cli/context.go @@ -14,10 +14,6 @@ func (self Context) Handlers() []*Handler { return self.handlers } -func (self Context) FilterHandlers(prefix string) []*Handler { - return filterHandlers(self.handlers, prefix) -} - type Arguments map[string]interface{} func (self Arguments) String(key string) string { diff --git a/cli/handler.go b/cli/handler.go index 5cd13f80..a4aafef2 100644 --- a/cli/handler.go +++ b/cli/handler.go @@ -20,7 +20,7 @@ type Handler struct { func (self *Handler) getParser() Parser { var parsers []Parser - for _, pattern := range splitPattern(self.Pattern) { + for _, pattern := range self.SplitPattern() { if isOptional(pattern) { name := optionalName(pattern) parser := getFlagParser(self.Flags[name]) @@ -35,6 +35,18 @@ func (self *Handler) getParser() Parser { return CompleteParser{parsers} } +// Split on spaces but ignore spaces inside <...> and [...] +func (self *Handler) SplitPattern() []string { + re := regexp.MustCompile(`(<[^>]+>|\[[^\]]+]|\S+)`) + matches := []string{} + + for _, value := range re.FindAllStringSubmatch(self.Pattern, -1) { + matches = append(matches, value[1]) + } + + return matches +} + func SetHandlers(h []*Handler) { handlers = h } @@ -74,32 +86,6 @@ func Handle(args []string) bool { return true } -func filterHandlers(handlers []*Handler, prefix string) []*Handler { - matches := []*Handler{} - - for _, h := range handlers { - pattern := strings.Join(stripOptionals(splitPattern(h.Pattern)), " ") - if strings.HasPrefix(pattern, prefix) { - matches = append(matches, h) - } - } - - return matches -} - - -// Split on spaces but ignore spaces inside <...> and [...] -func splitPattern(pattern string) []string { - re := regexp.MustCompile(`(<[^>]+>|\[[^\]]+]|\S+)`) - matches := []string{} - - for _, value := range re.FindAllStringSubmatch(pattern, -1) { - matches = append(matches, value[1]) - } - - return matches -} - func isCaptureGroup(arg string) bool { return strings.HasPrefix(arg, "<") && strings.HasSuffix(arg, ">") } @@ -111,15 +97,3 @@ func isOptional(arg string) bool { func optionalName(s string) string { return s[1:len(s) - 1] } - -// Strip optional groups from pattern -func stripOptionals(pattern []string) []string { - newArgs := []string{} - - for _, arg := range pattern { - if !isOptional(arg) { - newArgs = append(newArgs, arg) - } - } - return newArgs -} diff --git a/gdrive.go b/gdrive.go index 78b168a6..f20f9f3c 100644 --- a/gdrive.go +++ b/gdrive.go @@ -339,10 +339,15 @@ func main() { Callback: printHelp, }, &cli.Handler{ - Pattern: "help ", - Description: "Print subcommand help", + Pattern: "help ", + Description: "Print command help", Callback: printCommandHelp, }, + &cli.Handler{ + Pattern: "help ", + Description: "Print subcommand help", + Callback: printSubCommandHelp, + }, } cli.SetHandlers(handlers) diff --git a/handlers.go b/handlers_drive.go similarity index 83% rename from handlers.go rename to handlers_drive.go index 5e681ab2..21bf5f7a 100644 --- a/handlers.go +++ b/handlers_drive.go @@ -3,7 +3,6 @@ package main import ( "fmt" "os" - "strings" "./cli" "./auth" "./drive" @@ -142,40 +141,6 @@ func aboutHandler(ctx cli.Context) { checkErr(err) } -func printVersion(ctx cli.Context) { - fmt.Printf("%s v%s\n", Name, Version) -} - -func printHelp(ctx cli.Context) { - fmt.Printf("%s usage:\n\n", Name) - - for _, h := range ctx.Handlers() { - fmt.Printf("%s %s (%s)\n", Name, h.Pattern, h.Description) - } -} - -func printCommandHelp(ctx cli.Context) { - handlers := ctx.FilterHandlers(ctx.Args().String("subcommand")) - - if len(handlers) == 0 { - ExitF("Subcommand not found") - } - - if len(handlers) > 1 { - ExitF("More than one matching subcommand, be more specific") - } - - handler := handlers[0] - - fmt.Printf("%s %s (%s)\n", Name, handler.Pattern, handler.Description) - for name, flags := range handler.Flags { - fmt.Printf("\n%s:\n", name) - for _, flag := range flags { - fmt.Printf(" %s (%s)\n", strings.Join(flag.GetPatterns(), ", "), flag.GetDescription()) - } - } -} - func newDrive(args cli.Arguments) *drive.Drive { configDir := args.String("configDir") tokenPath := ConfigFilePath(configDir, TokenFilename) diff --git a/handlers_meta.go b/handlers_meta.go new file mode 100644 index 00000000..f0f7ef74 --- /dev/null +++ b/handlers_meta.go @@ -0,0 +1,76 @@ +package main + +import ( + "fmt" + "strings" + "./cli" +) + +func printVersion(ctx cli.Context) { + fmt.Printf("%s v%s\n", Name, Version) +} + +func printHelp(ctx cli.Context) { + fmt.Printf("%s usage:\n\n", Name) + + for _, h := range ctx.Handlers() { + fmt.Printf("%s %s (%s)\n", Name, h.Pattern, h.Description) + } +} + +func printCommandHelp(ctx cli.Context) { + args := ctx.Args() + prefix := []string{args.String("command")} + printCommandPrefixHelp(prefix, ctx) +} + +func printSubCommandHelp(ctx cli.Context) { + args := ctx.Args() + prefix := []string{args.String("command"), args.String("subcommand")} + printCommandPrefixHelp(prefix, ctx) +} + +func printCommandPrefixHelp(prefix []string, ctx cli.Context) { + handler := getHandler(ctx.Handlers(), prefix) + + if handler == nil { + ExitF("Command not found") + } + + fmt.Printf("%s %s (%s)\n", Name, handler.Pattern, handler.Description) + for name, flags := range handler.Flags { + fmt.Printf("\n%s:\n", name) + for _, flag := range flags { + fmt.Printf(" %s (%s)\n", strings.Join(flag.GetPatterns(), ", "), flag.GetDescription()) + } + } +} + +func getHandler(handlers []*cli.Handler, prefix []string) *cli.Handler { + for _, h := range handlers { + pattern := stripOptionals(h.SplitPattern()) + + if len(prefix) > len(pattern) { + continue + } + + if equal(prefix, pattern[:len(prefix)]) { + return h + } + } + + return nil +} + +// Strip optional groups (<...>) from pattern +func stripOptionals(pattern []string) []string { + newArgs := []string{} + + for _, arg := range pattern { + if strings.HasPrefix(arg, "[") && strings.HasSuffix(arg, "]") { + continue + } + newArgs = append(newArgs, arg) + } + return newArgs +} diff --git a/util.go b/util.go index 44166961..d40d33e0 100644 --- a/util.go +++ b/util.go @@ -22,6 +22,28 @@ func Homedir() string { return os.Getenv("HOME") } +func equal(a, b []string) bool { + if a == nil && b == nil { + return true; + } + + if a == nil || b == nil { + return false; + } + + if len(a) != len(b) { + return false + } + + for i := range a { + if a[i] != b[i] { + return false + } + } + + return true +} + func ExitF(format string, a ...interface{}) { fmt.Fprintf(os.Stderr, format, a...) fmt.Println("") From dffebe0c1729fa9cf3fb0ff3be270bec53fb6820 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 23 Jan 2016 15:21:58 +0100 Subject: [PATCH 034/195] Use variadic function --- handlers_meta.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/handlers_meta.go b/handlers_meta.go index f0f7ef74..6394cb30 100644 --- a/handlers_meta.go +++ b/handlers_meta.go @@ -20,17 +20,15 @@ func printHelp(ctx cli.Context) { func printCommandHelp(ctx cli.Context) { args := ctx.Args() - prefix := []string{args.String("command")} - printCommandPrefixHelp(prefix, ctx) + printCommandPrefixHelp(ctx, args.String("command")) } func printSubCommandHelp(ctx cli.Context) { args := ctx.Args() - prefix := []string{args.String("command"), args.String("subcommand")} - printCommandPrefixHelp(prefix, ctx) + printCommandPrefixHelp(ctx, args.String("command"), args.String("subcommand")) } -func printCommandPrefixHelp(prefix []string, ctx cli.Context) { +func printCommandPrefixHelp(ctx cli.Context, prefix ...string) { handler := getHandler(ctx.Handlers(), prefix) if handler == nil { From 21260bef3f29b97e1b1c2f7867b567066ebac543 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 23 Jan 2016 16:45:05 +0100 Subject: [PATCH 035/195] noProgress --- drive/upload.go | 1 + handlers_drive.go | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drive/upload.go b/drive/upload.go index fdba5ccc..d57e020a 100644 --- a/drive/upload.go +++ b/drive/upload.go @@ -19,6 +19,7 @@ type UploadFileArgs struct { Recursive bool Stdin bool Share bool + NoProgress bool } func (self *Drive) Upload(args UploadFileArgs) (err error) { diff --git a/handlers_drive.go b/handlers_drive.go index 21bf5f7a..f208bed4 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -33,7 +33,7 @@ func downloadHandler(ctx cli.Context) { Id: args.String("id"), Force: args.Bool("force"), Stdout: args.Bool("stdout"), - NoProgress: args.Bool("noprogress"), + NoProgress: args.Bool("noProgress"), }) checkErr(err) } @@ -49,6 +49,7 @@ func uploadHandler(ctx cli.Context) { Recursive: args.Bool("recursive"), Stdin: args.Bool("stdin"), Share: args.Bool("share"), + NoProgress: args.Bool("noProgress"), }) checkErr(err) } From e3aa4296e9fc875866d907df341637db4ac8c815 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 23 Jan 2016 17:09:47 +0100 Subject: [PATCH 036/195] Implement download revision --- drive/revision_download.go | 70 ++++++++++++++++++++++++++++++++++++++ gdrive.go | 28 +++++++++++++++ handlers_drive.go | 13 +++++++ 3 files changed, 111 insertions(+) create mode 100644 drive/revision_download.go diff --git a/drive/revision_download.go b/drive/revision_download.go new file mode 100644 index 00000000..c26c3ce7 --- /dev/null +++ b/drive/revision_download.go @@ -0,0 +1,70 @@ +package drive + +import ( + "fmt" + "io" + "os" +) + +type DownloadRevisionArgs struct { + Out io.Writer + FileId string + RevisionId string + Force bool + NoProgress bool + Stdout bool +} + +func (self *Drive) DownloadRevision(args DownloadRevisionArgs) (err error) { + getRev := self.service.Revisions.Get(args.FileId, args.RevisionId) + + rev, err := getRev.Fields("originalFilename").Do() + if err != nil { + return fmt.Errorf("Failed to get file: %s", err) + } + + if rev.OriginalFilename == "" { + return fmt.Errorf("Download is not supported for this file type") + } + + res, err := getRev.Download() + if err != nil { + return fmt.Errorf("Failed to download file: %s", err) + } + + // Close body on function exit + defer res.Body.Close() + + if args.Stdout { + // Write file content to stdout + _, err := io.Copy(os.Stdout, res.Body) + return err + } + + // Check if file exists + if !args.Force && fileExists(rev.OriginalFilename) { + return fmt.Errorf("File '%s' already exists, use --force to overwrite", rev.OriginalFilename) + } + + // Create new file + outFile, err := os.Create(rev.OriginalFilename) + if err != nil { + return fmt.Errorf("Unable to create new file: %s", err) + } + + // Close file on function exit + defer outFile.Close() + + // Save file to disk + bytes, err := io.Copy(outFile, res.Body) + if err != nil { + return fmt.Errorf("Failed saving file: %s", err) + } + + fmt.Fprintf(args.Out, "Downloaded '%s' at %s, total %d\n", rev.OriginalFilename, "x/s", bytes) + + //if deleteSourceFile { + // self.Delete(args.Id) + //} + return +} diff --git a/gdrive.go b/gdrive.go index f20f9f3c..967f6ec5 100644 --- a/gdrive.go +++ b/gdrive.go @@ -124,6 +124,34 @@ func main() { }, }, }, + &cli.Handler{ + Pattern: "[global options] download revision [options] ", + Description: "Download revision", + Callback: downloadRevisionHandler, + Flags: cli.Flags{ + "global options": globalFlags, + "options": []cli.Flag{ + cli.BoolFlag{ + Name: "force", + Patterns: []string{"-f", "--force"}, + Description: "Overwrite existing file", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "noProgress", + Patterns: []string{"--no-progress"}, + Description: "Hide progress", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "stdout", + Patterns: []string{"--stdout"}, + Description: "Write file content to stdout", + OmitValue: true, + }, + }, + }, + }, &cli.Handler{ Pattern: "[global options] upload [options] ", Description: "Upload file or directory", diff --git a/handlers_drive.go b/handlers_drive.go index f208bed4..1b019968 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -38,6 +38,19 @@ func downloadHandler(ctx cli.Context) { checkErr(err) } +func downloadRevisionHandler(ctx cli.Context) { + args := ctx.Args() + err := newDrive(args).DownloadRevision(drive.DownloadRevisionArgs{ + Out: os.Stdout, + FileId: args.String("fileId"), + RevisionId: args.String("revisionId"), + Force: args.Bool("force"), + Stdout: args.Bool("stdout"), + NoProgress: args.Bool("noProgress"), + }) + checkErr(err) +} + func uploadHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).Upload(drive.UploadFileArgs{ From a4217d488c382bee3e9f7faf33ec3d139ba54cd5 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 23 Jan 2016 17:22:37 +0100 Subject: [PATCH 037/195] Implement update --- drive/update.go | 71 +++++++++++++++++++++++++++++++++++++++++++++++ gdrive.go | 43 ++++++++++++++++++++++++++++ handlers_drive.go | 16 +++++++++++ 3 files changed, 130 insertions(+) create mode 100644 drive/update.go diff --git a/drive/update.go b/drive/update.go new file mode 100644 index 00000000..4fa33eda --- /dev/null +++ b/drive/update.go @@ -0,0 +1,71 @@ +package drive + +import ( + "fmt" + "mime" + "os" + "io" + "path/filepath" + "google.golang.org/api/drive/v3" + "golang.org/x/net/context" +) + +type UpdateArgs struct { + Out io.Writer + Id string + Path string + Name string + Parents []string + Mime string + Recursive bool + Stdin bool + Share bool + NoProgress bool +} + +func (self *Drive) Update(args UpdateArgs) (err error) { + //if args.Stdin { + // self.uploadStdin() + //} + + srcFile, err := os.Open(args.Path) + if err != nil { + return fmt.Errorf("Failed to open file: %s", err) + } + + srcFileInfo, err := srcFile.Stat() + if err != nil { + return fmt.Errorf("Failed to read file metadata: %s", err) + } + + // Instantiate empty drive file + dstFile := &drive.File{} + + // Use provided file name or use filename + if args.Name == "" { + dstFile.Name = filepath.Base(srcFileInfo.Name()) + } else { + dstFile.Name = args.Name + } + + // Set provided mime type or get type based on file extension + if args.Mime == "" { + dstFile.MimeType = mime.TypeByExtension(filepath.Ext(dstFile.Name)) + } else { + dstFile.MimeType = args.Mime + } + + // Set parent folders + dstFile.Parents = args.Parents + + f, err := self.service.Files.Update(args.Id, dstFile).ResumableMedia(context.Background(), srcFile, srcFileInfo.Size(), dstFile.MimeType).Do() + if err != nil { + return fmt.Errorf("Failed to upload file: %s", err) + } + + fmt.Fprintf(args.Out, "Uploaded '%s' at %s, total %d\n", f.Name, "x/s", f.Size) + //if args.Share { + // self.Share(TODO) + //} + return +} diff --git a/gdrive.go b/gdrive.go index 967f6ec5..354f2727 100644 --- a/gdrive.go +++ b/gdrive.go @@ -201,6 +201,49 @@ func main() { }, }, }, + &cli.Handler{ + Pattern: "[global options] update [options] ", + Description: "Update file, this creates a new revision of the file", + Callback: updateHandler, + Flags: cli.Flags{ + "global options": globalFlags, + "options": []cli.Flag{ + cli.StringSliceFlag{ + Name: "parent", + Patterns: []string{"-p", "--parent"}, + Description: "Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents", + }, + cli.StringFlag{ + Name: "name", + Patterns: []string{"--name"}, + Description: "Filename", + }, + cli.BoolFlag{ + Name: "noProgress", + Patterns: []string{"--no-progress"}, + Description: "Hide progress", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "stdin", + Patterns: []string{"--stdin"}, + Description: "Use stdin as file content", + OmitValue: true, + }, + cli.StringFlag{ + Name: "mime", + Patterns: []string{"--mime"}, + Description: "Force mime type", + }, + cli.BoolFlag{ + Name: "share", + Patterns: []string{"--share"}, + Description: "Share file", + OmitValue: true, + }, + }, + }, + }, &cli.Handler{ Pattern: "[global options] info [options] ", Description: "Show file info", diff --git a/handlers_drive.go b/handlers_drive.go index 1b019968..74919960 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -67,6 +67,22 @@ func uploadHandler(ctx cli.Context) { checkErr(err) } +func updateHandler(ctx cli.Context) { + args := ctx.Args() + err := newDrive(args).Update(drive.UpdateArgs{ + Out: os.Stdout, + Id: args.String("id"), + Path: args.String("path"), + Name: args.String("name"), + Parents: args.StringSlice("parent"), + Mime: args.String("mime"), + Stdin: args.Bool("stdin"), + Share: args.Bool("share"), + NoProgress: args.Bool("noProgress"), + }) + checkErr(err) +} + func infoHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).Info(drive.FileInfoArgs{ From 5386c4913a6b069f187da24615f545e2838265d6 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 23 Jan 2016 17:40:59 +0100 Subject: [PATCH 038/195] Implement delete revision --- drive/revision_delete.go | 31 +++++++++++++++++++++++++++++++ gdrive.go | 8 ++++++++ handlers_drive.go | 10 ++++++++++ 3 files changed, 49 insertions(+) create mode 100644 drive/revision_delete.go diff --git a/drive/revision_delete.go b/drive/revision_delete.go new file mode 100644 index 00000000..88c81c66 --- /dev/null +++ b/drive/revision_delete.go @@ -0,0 +1,31 @@ +package drive + +import ( + "io" + "fmt" +) + +type DeleteRevisionArgs struct { + Out io.Writer + FileId string + RevisionId string +} + +func (self *Drive) DeleteRevision(args DeleteRevisionArgs) (err error) { + rev, err := self.service.Revisions.Get(args.FileId, args.RevisionId).Fields("originalFilename").Do() + if err != nil { + return fmt.Errorf("Failed to get revision: %s", err) + } + + if rev.OriginalFilename == "" { + return fmt.Errorf("Deleting revisions for this file type is not supported") + } + + err = self.service.Revisions.Delete(args.FileId, args.RevisionId).Do() + if err != nil { + return fmt.Errorf("Failed to delete revision", err) + } + + fmt.Fprintf(args.Out, "Deleted revision '%s'\n", args.RevisionId) + return +} diff --git a/gdrive.go b/gdrive.go index 354f2727..7c7d39fd 100644 --- a/gdrive.go +++ b/gdrive.go @@ -371,6 +371,14 @@ func main() { "global options": globalFlags, }, }, + &cli.Handler{ + Pattern: "[global options] delete revision ", + Description: "Delete file revision", + Callback: deleteRevisionHandler, + Flags: cli.Flags{ + "global options": globalFlags, + }, + }, &cli.Handler{ Pattern: "[global options] about [options]", Description: "Google drive metadata, quota usage, import/export formats", diff --git a/handlers_drive.go b/handlers_drive.go index 74919960..988cc46c 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -160,6 +160,16 @@ func deleteHandler(ctx cli.Context) { checkErr(err) } +func deleteRevisionHandler(ctx cli.Context) { + args := ctx.Args() + err := newDrive(args).DeleteRevision(drive.DeleteRevisionArgs{ + Out: os.Stdout, + FileId: args.String("fileId"), + RevisionId: args.String("revisionId"), + }) + checkErr(err) +} + func aboutHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).About(drive.AboutArgs{ From 01ef731036bfff516ee26baa8e3c1ec25c60ecbb Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 23 Jan 2016 22:08:01 +0100 Subject: [PATCH 039/195] Implement upload via stdin --- drive/upload.go | 47 ++++++++++++++++++++++++++++++++++++++++++----- drive/util.go | 4 ++++ gdrive.go | 33 ++++++++++++++++++++++++++++++--- handlers_drive.go | 15 ++++++++++++++- 4 files changed, 90 insertions(+), 9 deletions(-) diff --git a/drive/upload.go b/drive/upload.go index d57e020a..5c5e38e2 100644 --- a/drive/upload.go +++ b/drive/upload.go @@ -6,6 +6,7 @@ import ( "os" "io" "path/filepath" + "google.golang.org/api/googleapi" "google.golang.org/api/drive/v3" "golang.org/x/net/context" ) @@ -17,16 +18,11 @@ type UploadFileArgs struct { Parents []string Mime string Recursive bool - Stdin bool Share bool NoProgress bool } func (self *Drive) Upload(args UploadFileArgs) (err error) { - //if args.Stdin { - // self.uploadStdin() - //} - srcFile, err := os.Open(args.Path) if err != nil { return fmt.Errorf("Failed to open file: %s", err) @@ -68,3 +64,44 @@ func (self *Drive) Upload(args UploadFileArgs) (err error) { //} return } + +type UploadStreamArgs struct { + Out io.Writer + In io.Reader + Name string + Parents []string + Mime string + Share bool + ChunkSize int64 +} + +func (self *Drive) UploadStream(args UploadStreamArgs) (err error) { + if args.ChunkSize > intMax() - 1 { + return fmt.Errorf("Chunk size is to big, max chunk size for this computer is %d", intMax() - 1) + } + + // Instantiate empty drive file + dstFile := &drive.File{Name: args.Name} + + // Set mime type if provided + if args.Mime != "" { + dstFile.MimeType = args.Mime + } + + // Set parent folders + dstFile.Parents = args.Parents + + // Chunk size option + chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) + + f, err := self.service.Files.Create(dstFile).Media(args.In, chunkSize).Do() + if err != nil { + return fmt.Errorf("Failed to upload file: %s", err) + } + + fmt.Fprintf(args.Out, "Uploaded '%s' at %s, total %d\n", f.Name, "x/s", f.Size) + //if args.Share { + // self.Share(TODO) + //} + return +} diff --git a/drive/util.go b/drive/util.go index be01c80e..af80b20d 100644 --- a/drive/util.go +++ b/drive/util.go @@ -105,3 +105,7 @@ func fileExists(path string) bool { } return false } + +func intMax() int64 { + return 1 << (strconv.IntSize - 1) - 1 +} diff --git a/gdrive.go b/gdrive.go index 7c7d39fd..03f5e3b6 100644 --- a/gdrive.go +++ b/gdrive.go @@ -11,6 +11,7 @@ const Version = "2.0.0" const DefaultMaxFiles = 30 const DefaultNameWidth = 40 +const DefaultUploadChunkSize = 8 * 1024 * 1024 const DefaultQuery = "trashed = false and 'me' in owners" const DefaultShareRole = "reader" const DefaultShareType = "anyone" @@ -181,12 +182,38 @@ func main() { Description: "Hide progress", OmitValue: true, }, + cli.StringFlag{ + Name: "mime", + Patterns: []string{"--mime"}, + Description: "Force mime type", + }, cli.BoolFlag{ - Name: "stdin", - Patterns: []string{"--stdin"}, - Description: "Use stdin as file content", + Name: "share", + Patterns: []string{"--share"}, + Description: "Share file", OmitValue: true, }, + }, + }, + }, + &cli.Handler{ + Pattern: "[global options] upload stdin [options] ", + Description: "Upload file from stdin", + Callback: uploadStdinHandler, + Flags: cli.Flags{ + "global options": globalFlags, + "options": []cli.Flag{ + cli.StringSliceFlag{ + Name: "parent", + Patterns: []string{"-p", "--parent"}, + Description: "Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents", + }, + cli.IntFlag{ + Name: "chunksize", + Patterns: []string{"--chunksize"}, + Description: fmt.Sprintf("Set chunk size in bytes, default: %d", DefaultUploadChunkSize), + DefaultValue: DefaultUploadChunkSize, + }, cli.StringFlag{ Name: "mime", Patterns: []string{"--mime"}, diff --git a/handlers_drive.go b/handlers_drive.go index 988cc46c..5c3b5499 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -60,13 +60,26 @@ func uploadHandler(ctx cli.Context) { Parents: args.StringSlice("parent"), Mime: args.String("mime"), Recursive: args.Bool("recursive"), - Stdin: args.Bool("stdin"), Share: args.Bool("share"), NoProgress: args.Bool("noProgress"), }) checkErr(err) } +func uploadStdinHandler(ctx cli.Context) { + args := ctx.Args() + err := newDrive(args).UploadStream(drive.UploadStreamArgs{ + Out: os.Stdout, + In: os.Stdin, + Name: args.String("name"), + Parents: args.StringSlice("parent"), + Mime: args.String("mime"), + Share: args.Bool("share"), + ChunkSize: args.Int64("chunksize"), + }) + checkErr(err) +} + func updateHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).Update(drive.UpdateArgs{ From e6dd66c5722e37a4d2ea7000ce4d1f4090af9cc7 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 23 Jan 2016 22:27:50 +0100 Subject: [PATCH 040/195] Remove url handler, add urls to info --- drive/info.go | 4 +++- drive/url.go | 28 ---------------------------- gdrive.go | 16 ---------------- handlers_drive.go | 9 --------- 4 files changed, 3 insertions(+), 54 deletions(-) delete mode 100644 drive/url.go diff --git a/drive/info.go b/drive/info.go index 936b7da1..d8383c39 100644 --- a/drive/info.go +++ b/drive/info.go @@ -13,7 +13,7 @@ type FileInfoArgs struct { } func (self *Drive) Info(args FileInfoArgs) (err error) { - f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "createdTime", "modifiedTime", "md5Checksum", "mimeType", "parents", "shared", "description").Do() + f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "createdTime", "modifiedTime", "md5Checksum", "mimeType", "parents", "shared", "description", "webContentLink", "webViewLink").Do() if err != nil { return fmt.Errorf("Failed to get file: %s", err) } @@ -47,6 +47,8 @@ func PrintFileInfo(args PrintFileInfoArgs) { kv{"Md5sum", f.Md5Checksum}, kv{"Shared", formatBool(f.Shared)}, kv{"Parents", formatList(f.Parents)}, + kv{"ViewUrl", f.WebViewLink}, + kv{"DownloadUrl", f.WebContentLink}, } for _, item := range items { diff --git a/drive/url.go b/drive/url.go deleted file mode 100644 index 9dee4ad0..00000000 --- a/drive/url.go +++ /dev/null @@ -1,28 +0,0 @@ -package drive - -import ( - "io" - "fmt" -) - -type UrlArgs struct { - Out io.Writer - FileId string - DownloadUrl bool -} - -func (self *Drive) Url(args UrlArgs) { - if args.DownloadUrl { - fmt.Fprintln(args.Out, downloadUrl(args.FileId)) - return - } - fmt.Fprintln(args.Out, previewUrl(args.FileId)) -} - -func previewUrl(id string) string { - return fmt.Sprintf("https://drive.google.com/uc?id=%s", id) -} - -func downloadUrl(id string) string { - return fmt.Sprintf("https://drive.google.com/uc?id=%s&export=download", id) -} diff --git a/gdrive.go b/gdrive.go index 03f5e3b6..4349b024 100644 --- a/gdrive.go +++ b/gdrive.go @@ -374,22 +374,6 @@ func main() { }, }, }, - &cli.Handler{ - Pattern: "[global options] url [options] ", - Description: "Get url to file or directory", - Callback: urlHandler, - Flags: cli.Flags{ - "global options": globalFlags, - "options": []cli.Flag{ - cli.BoolFlag{ - Name: "download", - Patterns: []string{"--download"}, - Description: "Download url", - OmitValue: true, - }, - }, - }, - }, &cli.Handler{ Pattern: "[global options] delete ", Description: "Delete file or directory", diff --git a/handlers_drive.go b/handlers_drive.go index 5c3b5499..eeb997fc 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -155,15 +155,6 @@ func shareHandler(ctx cli.Context) { checkErr(err) } -func urlHandler(ctx cli.Context) { - args := ctx.Args() - newDrive(args).Url(drive.UrlArgs{ - Out: os.Stdout, - FileId: args.String("id"), - DownloadUrl: args.Bool("download"), - }) -} - func deleteHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).Delete(drive.DeleteArgs{ From 2304f9ae29a490231b8ba38d346de0754c4a7fd4 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 23 Jan 2016 22:38:45 +0100 Subject: [PATCH 041/195] Give about import/export own handlers --- drive/about.go | 42 ++++++++++++++++++++++++++++-------------- gdrive.go | 30 +++++++++++++++++------------- handlers_drive.go | 18 ++++++++++++++++-- 3 files changed, 61 insertions(+), 29 deletions(-) diff --git a/drive/about.go b/drive/about.go index dea927ca..01d70734 100644 --- a/drive/about.go +++ b/drive/about.go @@ -9,26 +9,14 @@ import ( type AboutArgs struct { Out io.Writer SizeInBytes bool - ImportFormats bool - ExportFormats bool } func (self *Drive) About(args AboutArgs) (err error) { - about, err := self.service.About.Get().Fields("exportFormats", "importFormats", "maxImportSizes", "maxUploadSize", "storageQuota", "user").Do() + about, err := self.service.About.Get().Fields("maxImportSizes", "maxUploadSize", "storageQuota", "user").Do() if err != nil { return fmt.Errorf("Failed to get about: %s", err) } - if args.ExportFormats { - printSupportedFormats(args.Out, about.ExportFormats) - return - } - - if args.ImportFormats { - printSupportedFormats(args.Out, about.ImportFormats) - return - } - user := about.User quota := about.StorageQuota @@ -40,7 +28,33 @@ func (self *Drive) About(args AboutArgs) (err error) { return } -func printSupportedFormats(out io.Writer, formats map[string][]string) { +type AboutImportArgs struct { + Out io.Writer +} + +func (self *Drive) AboutImport(args AboutImportArgs) (err error) { + about, err := self.service.About.Get().Fields("importFormats").Do() + if err != nil { + return fmt.Errorf("Failed to get about: %s", err) + } + printAboutFormats(args.Out, about.ImportFormats) + return +} + +type AboutExportArgs struct { + Out io.Writer +} + +func (self *Drive) AboutExport(args AboutExportArgs) (err error) { + about, err := self.service.About.Get().Fields("exportFormats").Do() + if err != nil { + return fmt.Errorf("Failed to get about: %s", err) + } + printAboutFormats(args.Out, about.ExportFormats) + return +} + +func printAboutFormats(out io.Writer, formats map[string][]string) { w := new(tabwriter.Writer) w.Init(out, 0, 0, 3, ' ', 0) diff --git a/gdrive.go b/gdrive.go index 4349b024..2980fa46 100644 --- a/gdrive.go +++ b/gdrive.go @@ -392,7 +392,7 @@ func main() { }, &cli.Handler{ Pattern: "[global options] about [options]", - Description: "Google drive metadata, quota usage, import/export formats", + Description: "Google drive metadata, quota usage", Callback: aboutHandler, Flags: cli.Flags{ "global options": globalFlags, @@ -403,21 +403,25 @@ func main() { Description: "Show size in bytes", OmitValue: true, }, - cli.BoolFlag{ - Name: "exportFormats", - Patterns: []string{"--export"}, - Description: "Show supported export formats", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "importFormats", - Patterns: []string{"--import"}, - Description: "Show supported import formats", - OmitValue: true, - }, }, }, }, + &cli.Handler{ + Pattern: "[global options] about import", + Description: "Show supported import formats", + Callback: aboutImportHandler, + Flags: cli.Flags{ + "global options": globalFlags, + }, + }, + &cli.Handler{ + Pattern: "[global options] about export", + Description: "Show supported export formats", + Callback: aboutExportHandler, + Flags: cli.Flags{ + "global options": globalFlags, + }, + }, &cli.Handler{ Pattern: "version", Description: "Print application version", diff --git a/handlers_drive.go b/handlers_drive.go index eeb997fc..d454b4fb 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -179,8 +179,22 @@ func aboutHandler(ctx cli.Context) { err := newDrive(args).About(drive.AboutArgs{ Out: os.Stdout, SizeInBytes: args.Bool("sizeInBytes"), - ImportFormats: args.Bool("importFormats"), - ExportFormats: args.Bool("exportFormats"), + }) + checkErr(err) +} + +func aboutImportHandler(ctx cli.Context) { + args := ctx.Args() + err := newDrive(args).AboutImport(drive.AboutImportArgs{ + Out: os.Stdout, + }) + checkErr(err) +} + +func aboutExportHandler(ctx cli.Context) { + args := ctx.Args() + err := newDrive(args).AboutExport(drive.AboutExportArgs{ + Out: os.Stdout, }) checkErr(err) } From fdb18f8a1aa5ef13e22437fc56fc303bf65afc4e Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 23 Jan 2016 22:46:22 +0100 Subject: [PATCH 042/195] s/global options/global/ --- gdrive.go | 64 +++++++++++++++++++++++++++---------------------------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/gdrive.go b/gdrive.go index 2980fa46..9dce10f4 100644 --- a/gdrive.go +++ b/gdrive.go @@ -30,11 +30,11 @@ func main() { handlers := []*cli.Handler{ &cli.Handler{ - Pattern: "[global options] list [options]", + Pattern: "[global] list [options]", Description: "List files", Callback: listHandler, Flags: cli.Flags{ - "global options": globalFlags, + "global": globalFlags, "options": []cli.Flag{ cli.IntFlag{ Name: "maxFiles", @@ -70,11 +70,11 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global options] list revisions [options] ", + Pattern: "[global] list revisions [options] ", Description: "List file revisions", Callback: listRevisionsHandler, Flags: cli.Flags{ - "global options": globalFlags, + "global": globalFlags, "options": []cli.Flag{ cli.IntFlag{ Name: "nameWidth", @@ -98,11 +98,11 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global options] download [options] ", + Pattern: "[global] download [options] ", Description: "Download file or directory", Callback: downloadHandler, Flags: cli.Flags{ - "global options": globalFlags, + "global": globalFlags, "options": []cli.Flag{ cli.BoolFlag{ Name: "force", @@ -126,11 +126,11 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global options] download revision [options] ", + Pattern: "[global] download revision [options] ", Description: "Download revision", Callback: downloadRevisionHandler, Flags: cli.Flags{ - "global options": globalFlags, + "global": globalFlags, "options": []cli.Flag{ cli.BoolFlag{ Name: "force", @@ -154,11 +154,11 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global options] upload [options] ", + Pattern: "[global] upload [options] ", Description: "Upload file or directory", Callback: uploadHandler, Flags: cli.Flags{ - "global options": globalFlags, + "global": globalFlags, "options": []cli.Flag{ cli.BoolFlag{ Name: "recursive", @@ -197,11 +197,11 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global options] upload stdin [options] ", + Pattern: "[global] upload stdin [options] ", Description: "Upload file from stdin", Callback: uploadStdinHandler, Flags: cli.Flags{ - "global options": globalFlags, + "global": globalFlags, "options": []cli.Flag{ cli.StringSliceFlag{ Name: "parent", @@ -229,11 +229,11 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global options] update [options] ", + Pattern: "[global] update [options] ", Description: "Update file, this creates a new revision of the file", Callback: updateHandler, Flags: cli.Flags{ - "global options": globalFlags, + "global": globalFlags, "options": []cli.Flag{ cli.StringSliceFlag{ Name: "parent", @@ -272,11 +272,11 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global options] info [options] ", + Pattern: "[global] info [options] ", Description: "Show file info", Callback: infoHandler, Flags: cli.Flags{ - "global options": globalFlags, + "global": globalFlags, "options": []cli.Flag{ cli.BoolFlag{ Name: "sizeInBytes", @@ -288,11 +288,11 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global options] export [options] ", + Pattern: "[global] export [options] ", Description: "Export a google document", Callback: exportHandler, Flags: cli.Flags{ - "global options": globalFlags, + "global": globalFlags, "options": []cli.Flag{ cli.BoolFlag{ Name: "force", @@ -315,11 +315,11 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global options] mkdir [options] ", + Pattern: "[global] mkdir [options] ", Description: "Create directory", Callback: mkdirHandler, Flags: cli.Flags{ - "global options": globalFlags, + "global": globalFlags, "options": []cli.Flag{ cli.StringSliceFlag{ Name: "parent", @@ -336,11 +336,11 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global options] share [options] ", + Pattern: "[global] share [options] ", Description: "Share file or directory", Callback: shareHandler, Flags: cli.Flags{ - "global options": globalFlags, + "global": globalFlags, "options": []cli.Flag{ cli.BoolFlag{ Name: "discoverable", @@ -375,27 +375,27 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global options] delete ", + Pattern: "[global] delete ", Description: "Delete file or directory", Callback: deleteHandler, Flags: cli.Flags{ - "global options": globalFlags, + "global": globalFlags, }, }, &cli.Handler{ - Pattern: "[global options] delete revision ", + Pattern: "[global] delete revision ", Description: "Delete file revision", Callback: deleteRevisionHandler, Flags: cli.Flags{ - "global options": globalFlags, + "global": globalFlags, }, }, &cli.Handler{ - Pattern: "[global options] about [options]", + Pattern: "[global] about [options]", Description: "Google drive metadata, quota usage", Callback: aboutHandler, Flags: cli.Flags{ - "global options": globalFlags, + "global": globalFlags, "options": []cli.Flag{ cli.BoolFlag{ Name: "sizeInBytes", @@ -407,19 +407,19 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global options] about import", + Pattern: "[global] about import", Description: "Show supported import formats", Callback: aboutImportHandler, Flags: cli.Flags{ - "global options": globalFlags, + "global": globalFlags, }, }, &cli.Handler{ - Pattern: "[global options] about export", + Pattern: "[global] about export", Description: "Show supported export formats", Callback: aboutExportHandler, Flags: cli.Flags{ - "global options": globalFlags, + "global": globalFlags, }, }, &cli.Handler{ From 280ce2d2a71d27b4d389d130865e909e61dead25 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 23 Jan 2016 23:41:19 +0100 Subject: [PATCH 043/195] ResumableMedia is deprecated, use Media with ChunkSize --- drive/upload.go | 11 +++++++++-- gdrive.go | 6 ++++++ handlers_drive.go | 1 + 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/drive/upload.go b/drive/upload.go index 5c5e38e2..03cb5ad5 100644 --- a/drive/upload.go +++ b/drive/upload.go @@ -8,7 +8,6 @@ import ( "path/filepath" "google.golang.org/api/googleapi" "google.golang.org/api/drive/v3" - "golang.org/x/net/context" ) type UploadFileArgs struct { @@ -20,9 +19,14 @@ type UploadFileArgs struct { Recursive bool Share bool NoProgress bool + ChunkSize int64 } func (self *Drive) Upload(args UploadFileArgs) (err error) { + if args.ChunkSize > intMax() - 1 { + return fmt.Errorf("Chunk size is to big, max chunk size for this computer is %d", intMax() - 1) + } + srcFile, err := os.Open(args.Path) if err != nil { return fmt.Errorf("Failed to open file: %s", err) @@ -53,7 +57,10 @@ func (self *Drive) Upload(args UploadFileArgs) (err error) { // Set parent folders dstFile.Parents = args.Parents - f, err := self.service.Files.Create(dstFile).ResumableMedia(context.Background(), srcFile, srcFileInfo.Size(), dstFile.MimeType).Do() + // Chunk size option + chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) + + f, err := self.service.Files.Create(dstFile).Media(srcFile, chunkSize).Do() if err != nil { return fmt.Errorf("Failed to upload file: %s", err) } diff --git a/gdrive.go b/gdrive.go index 9dce10f4..8754d90a 100644 --- a/gdrive.go +++ b/gdrive.go @@ -193,6 +193,12 @@ func main() { Description: "Share file", OmitValue: true, }, + cli.IntFlag{ + Name: "chunksize", + Patterns: []string{"--chunksize"}, + Description: fmt.Sprintf("Set chunk size in bytes, default: %d", DefaultUploadChunkSize), + DefaultValue: DefaultUploadChunkSize, + }, }, }, }, diff --git a/handlers_drive.go b/handlers_drive.go index d454b4fb..8245481c 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -62,6 +62,7 @@ func uploadHandler(ctx cli.Context) { Recursive: args.Bool("recursive"), Share: args.Bool("share"), NoProgress: args.Bool("noProgress"), + ChunkSize: args.Int64("chunksize"), }) checkErr(err) } From 3d8d85ffb777dbf56276037b1d6f2222663a1de6 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 24 Jan 2016 01:56:01 +0100 Subject: [PATCH 044/195] Initial progress indicator --- drive/util.go | 86 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/drive/util.go b/drive/util.go index af80b20d..366f32bf 100644 --- a/drive/util.go +++ b/drive/util.go @@ -1,11 +1,13 @@ package drive import ( + "io" "os" "fmt" "strings" "strconv" "unicode/utf8" + "math" "time" ) @@ -39,6 +41,21 @@ func formatSize(bytes int64, forceBytes bool) string { return fmt.Sprintf("%.1f %s", value, units[i]) } +func calcRate(bytes int64, start, end time.Time) int64 { + seconds := float64(end.Sub(start).Seconds()) + if seconds < 1.0 { + return bytes + } + return round(float64(bytes) / seconds) +} + +func round(n float64) int64 { + if n < 0 { + return int64(math.Ceil(n - 0.5)) + } + return int64(math.Floor(n + 0.5)) +} + func formatBool(b bool) string { return strings.Title(strconv.FormatBool(b)) } @@ -109,3 +126,72 @@ func fileExists(path string) bool { func intMax() int64 { return 1 << (strconv.IntSize - 1) - 1 } + +type Progress struct { + Writer io.Writer + Reader io.Reader + Size int64 + progress int64 + rate int64 + rateProgress int64 + rateUpdated time.Time + updated time.Time + done bool +} + +func (self *Progress) Read(p []byte) (int, error) { + // Read + n, err := self.Reader.Read(p) + + now := time.Now() + isLast := err != nil + + // Increment progress + newProgress := self.progress + int64(n) + self.progress = newProgress + + if self.rateUpdated.IsZero() { + self.rateUpdated = now + } + + // Update rate every 3 seconds + if self.rateUpdated.Add(time.Second * 3).Before(now) { + self.rate = calcRate(newProgress - self.rateProgress, self.rateUpdated, now) + self.rateUpdated = now + self.rateProgress = newProgress + } + + // Draw progress every second + if self.updated.Add(time.Second).Before(now) || isLast { + self.Draw(isLast) + } + + // Update last draw time + self.updated = now + + // Mark as done if error occurs + self.done = isLast + + return n, err +} + +func (self *Progress) Draw(isLast bool) { + if self.done { + return + } + + // Clear line + fmt.Fprintf(self.Writer, "\r%50s", "") + + // Print progress + fmt.Fprintf(self.Writer, "\r%s/%s", formatSize(self.progress, false), formatSize(self.Size, false)) + + // Print rate + if self.rate > 0 { + fmt.Fprintf(self.Writer, ", Rate: %s/s", formatSize(self.rate, false)) + } + + if isLast { + fmt.Fprintf(self.Writer, "\n") + } +} From a008740722a824e707eea181ce4ae30bfa7d031a Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 24 Jan 2016 02:00:20 +0100 Subject: [PATCH 045/195] Initialize rateProgress --- drive/util.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drive/util.go b/drive/util.go index 366f32bf..4103b9d4 100644 --- a/drive/util.go +++ b/drive/util.go @@ -150,8 +150,10 @@ func (self *Progress) Read(p []byte) (int, error) { newProgress := self.progress + int64(n) self.progress = newProgress + // Initialize rate state if self.rateUpdated.IsZero() { self.rateUpdated = now + self.rateProgress = newProgress } // Update rate every 3 seconds From b658f83ced4f558a6e23e554965d73bd6d4a04d1 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 24 Jan 2016 11:21:38 +0100 Subject: [PATCH 046/195] Move progress type to own file --- drive/progress.go | 78 +++++++++++++++++++++++++++++++++++++++++++++++ drive/util.go | 72 ------------------------------------------- 2 files changed, 78 insertions(+), 72 deletions(-) create mode 100644 drive/progress.go diff --git a/drive/progress.go b/drive/progress.go new file mode 100644 index 00000000..ee63b6c0 --- /dev/null +++ b/drive/progress.go @@ -0,0 +1,78 @@ +package drive + +import ( + "io" + "fmt" + "time" +) + +type Progress struct { + Writer io.Writer + Reader io.Reader + Size int64 + progress int64 + rate int64 + rateProgress int64 + rateUpdated time.Time + updated time.Time + done bool +} + +func (self *Progress) Read(p []byte) (int, error) { + // Read + n, err := self.Reader.Read(p) + + now := time.Now() + isLast := err != nil + + // Increment progress + newProgress := self.progress + int64(n) + self.progress = newProgress + + // Initialize rate state + if self.rateUpdated.IsZero() { + self.rateUpdated = now + self.rateProgress = newProgress + } + + // Update rate every 3 seconds + if self.rateUpdated.Add(time.Second * 3).Before(now) { + self.rate = calcRate(newProgress - self.rateProgress, self.rateUpdated, now) + self.rateUpdated = now + self.rateProgress = newProgress + } + + // Draw progress every second + if self.updated.Add(time.Second).Before(now) || isLast { + self.Draw(isLast) + } + + // Update last draw time + self.updated = now + + // Mark as done if error occurs + self.done = isLast + + return n, err +} + +func (self *Progress) Draw(isLast bool) { + if self.done { + return + } + + // Clear line + fmt.Fprintf(self.Writer, "\r%50s", "") + + // Print progress + fmt.Fprintf(self.Writer, "\r%s/%s", formatSize(self.progress, false), formatSize(self.Size, false)) + + // Print rate + if self.rate > 0 { + fmt.Fprintf(self.Writer, ", Rate: %s/s", formatSize(self.rate, false)) + } + + if isLast { + fmt.Fprintf(self.Writer, "\n") + } +} diff --git a/drive/util.go b/drive/util.go index 4103b9d4..8b3d1716 100644 --- a/drive/util.go +++ b/drive/util.go @@ -1,7 +1,6 @@ package drive import ( - "io" "os" "fmt" "strings" @@ -126,74 +125,3 @@ func fileExists(path string) bool { func intMax() int64 { return 1 << (strconv.IntSize - 1) - 1 } - -type Progress struct { - Writer io.Writer - Reader io.Reader - Size int64 - progress int64 - rate int64 - rateProgress int64 - rateUpdated time.Time - updated time.Time - done bool -} - -func (self *Progress) Read(p []byte) (int, error) { - // Read - n, err := self.Reader.Read(p) - - now := time.Now() - isLast := err != nil - - // Increment progress - newProgress := self.progress + int64(n) - self.progress = newProgress - - // Initialize rate state - if self.rateUpdated.IsZero() { - self.rateUpdated = now - self.rateProgress = newProgress - } - - // Update rate every 3 seconds - if self.rateUpdated.Add(time.Second * 3).Before(now) { - self.rate = calcRate(newProgress - self.rateProgress, self.rateUpdated, now) - self.rateUpdated = now - self.rateProgress = newProgress - } - - // Draw progress every second - if self.updated.Add(time.Second).Before(now) || isLast { - self.Draw(isLast) - } - - // Update last draw time - self.updated = now - - // Mark as done if error occurs - self.done = isLast - - return n, err -} - -func (self *Progress) Draw(isLast bool) { - if self.done { - return - } - - // Clear line - fmt.Fprintf(self.Writer, "\r%50s", "") - - // Print progress - fmt.Fprintf(self.Writer, "\r%s/%s", formatSize(self.progress, false), formatSize(self.Size, false)) - - // Print rate - if self.rate > 0 { - fmt.Fprintf(self.Writer, ", Rate: %s/s", formatSize(self.rate, false)) - } - - if isLast { - fmt.Fprintf(self.Writer, "\n") - } -} From 06c6857d7d8837e28cdc45a33f4f69ec882c6fe2 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 24 Jan 2016 12:39:16 +0100 Subject: [PATCH 047/195] Enable progress indicator --- drive/download.go | 9 ++++++--- drive/progress.go | 13 +++++++++++++ drive/revision_download.go | 9 ++++++--- drive/update.go | 13 ++++++++++--- drive/upload.go | 7 +++++-- gdrive.go | 6 ++++++ handlers_drive.go | 18 ++++++++++++++---- 7 files changed, 60 insertions(+), 15 deletions(-) diff --git a/drive/download.go b/drive/download.go index 31bcc585..e5674ad6 100644 --- a/drive/download.go +++ b/drive/download.go @@ -8,9 +8,9 @@ import ( type DownloadFileArgs struct { Out io.Writer + Progress io.Writer Id string Force bool - NoProgress bool Stdout bool } @@ -30,9 +30,12 @@ func (self *Drive) Download(args DownloadFileArgs) (err error) { // Close body on function exit defer res.Body.Close() + // Wrap response body in progress reader + srcReader := getProgressReader(res.Body, args.Progress, res.ContentLength) + if args.Stdout { // Write file content to stdout - _, err := io.Copy(os.Stdout, res.Body) + _, err := io.Copy(args.Out, srcReader) return err } @@ -51,7 +54,7 @@ func (self *Drive) Download(args DownloadFileArgs) (err error) { defer outFile.Close() // Save file to disk - bytes, err := io.Copy(outFile, res.Body) + bytes, err := io.Copy(outFile, srcReader) if err != nil { return fmt.Errorf("Failed saving file: %s", err) } diff --git a/drive/progress.go b/drive/progress.go index ee63b6c0..6187058a 100644 --- a/drive/progress.go +++ b/drive/progress.go @@ -2,10 +2,23 @@ package drive import ( "io" + "io/ioutil" "fmt" "time" ) +func getProgressReader(r io.Reader, w io.Writer, size int64) io.Reader { + if w == ioutil.Discard || size < 1024 * 1024 { + return r + } + + return &Progress{ + Reader: r, + Writer: w, + Size: size, + } +} + type Progress struct { Writer io.Writer Reader io.Reader diff --git a/drive/revision_download.go b/drive/revision_download.go index c26c3ce7..f06dac3e 100644 --- a/drive/revision_download.go +++ b/drive/revision_download.go @@ -8,10 +8,10 @@ import ( type DownloadRevisionArgs struct { Out io.Writer + Progress io.Writer FileId string RevisionId string Force bool - NoProgress bool Stdout bool } @@ -35,9 +35,12 @@ func (self *Drive) DownloadRevision(args DownloadRevisionArgs) (err error) { // Close body on function exit defer res.Body.Close() + // Wrap response body in progress reader + srcReader := getProgressReader(res.Body, args.Progress, res.ContentLength) + if args.Stdout { // Write file content to stdout - _, err := io.Copy(os.Stdout, res.Body) + _, err := io.Copy(args.Out, srcReader) return err } @@ -56,7 +59,7 @@ func (self *Drive) DownloadRevision(args DownloadRevisionArgs) (err error) { defer outFile.Close() // Save file to disk - bytes, err := io.Copy(outFile, res.Body) + bytes, err := io.Copy(outFile, srcReader) if err != nil { return fmt.Errorf("Failed saving file: %s", err) } diff --git a/drive/update.go b/drive/update.go index 4fa33eda..806b7054 100644 --- a/drive/update.go +++ b/drive/update.go @@ -6,12 +6,13 @@ import ( "os" "io" "path/filepath" + "google.golang.org/api/googleapi" "google.golang.org/api/drive/v3" - "golang.org/x/net/context" ) type UpdateArgs struct { Out io.Writer + Progress io.Writer Id string Path string Name string @@ -20,7 +21,7 @@ type UpdateArgs struct { Recursive bool Stdin bool Share bool - NoProgress bool + ChunkSize int64 } func (self *Drive) Update(args UpdateArgs) (err error) { @@ -58,7 +59,13 @@ func (self *Drive) Update(args UpdateArgs) (err error) { // Set parent folders dstFile.Parents = args.Parents - f, err := self.service.Files.Update(args.Id, dstFile).ResumableMedia(context.Background(), srcFile, srcFileInfo.Size(), dstFile.MimeType).Do() + // Chunk size option + chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) + + // Wrap file in progress reader + srcReader := getProgressReader(srcFile, args.Progress, srcFileInfo.Size()) + + f, err := self.service.Files.Update(args.Id, dstFile).Media(srcReader, chunkSize).Do() if err != nil { return fmt.Errorf("Failed to upload file: %s", err) } diff --git a/drive/upload.go b/drive/upload.go index 03cb5ad5..0c1df0c9 100644 --- a/drive/upload.go +++ b/drive/upload.go @@ -12,13 +12,13 @@ import ( type UploadFileArgs struct { Out io.Writer + Progress io.Writer Path string Name string Parents []string Mime string Recursive bool Share bool - NoProgress bool ChunkSize int64 } @@ -60,7 +60,10 @@ func (self *Drive) Upload(args UploadFileArgs) (err error) { // Chunk size option chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) - f, err := self.service.Files.Create(dstFile).Media(srcFile, chunkSize).Do() + // Wrap file in progress reader + srcReader := getProgressReader(srcFile, args.Progress, srcFileInfo.Size()) + + f, err := self.service.Files.Create(dstFile).Media(srcReader, chunkSize).Do() if err != nil { return fmt.Errorf("Failed to upload file: %s", err) } diff --git a/gdrive.go b/gdrive.go index 8754d90a..86989111 100644 --- a/gdrive.go +++ b/gdrive.go @@ -274,6 +274,12 @@ func main() { Description: "Share file", OmitValue: true, }, + cli.IntFlag{ + Name: "chunksize", + Patterns: []string{"--chunksize"}, + Description: fmt.Sprintf("Set chunk size in bytes, default: %d", DefaultUploadChunkSize), + DefaultValue: DefaultUploadChunkSize, + }, }, }, }, diff --git a/handlers_drive.go b/handlers_drive.go index 8245481c..00ff7819 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -3,6 +3,8 @@ package main import ( "fmt" "os" + "io" + "io/ioutil" "./cli" "./auth" "./drive" @@ -33,7 +35,7 @@ func downloadHandler(ctx cli.Context) { Id: args.String("id"), Force: args.Bool("force"), Stdout: args.Bool("stdout"), - NoProgress: args.Bool("noProgress"), + Progress: progressWriter(args.Bool("noProgress")), }) checkErr(err) } @@ -46,7 +48,7 @@ func downloadRevisionHandler(ctx cli.Context) { RevisionId: args.String("revisionId"), Force: args.Bool("force"), Stdout: args.Bool("stdout"), - NoProgress: args.Bool("noProgress"), + Progress: progressWriter(args.Bool("noProgress")), }) checkErr(err) } @@ -55,13 +57,13 @@ func uploadHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).Upload(drive.UploadFileArgs{ Out: os.Stdout, + Progress: progressWriter(args.Bool("noProgress")), Path: args.String("path"), Name: args.String("name"), Parents: args.StringSlice("parent"), Mime: args.String("mime"), Recursive: args.Bool("recursive"), Share: args.Bool("share"), - NoProgress: args.Bool("noProgress"), ChunkSize: args.Int64("chunksize"), }) checkErr(err) @@ -92,7 +94,8 @@ func updateHandler(ctx cli.Context) { Mime: args.String("mime"), Stdin: args.Bool("stdin"), Share: args.Bool("share"), - NoProgress: args.Bool("noProgress"), + Progress: progressWriter(args.Bool("noProgress")), + ChunkSize: args.Int64("chunksize"), }) checkErr(err) } @@ -230,3 +233,10 @@ func authCodePrompt(url string) func() string { return code } } + +func progressWriter(discard bool) io.Writer { + if discard { + return ioutil.Discard + } + return os.Stderr +} From 5bacd6be45dfb961c78f6e89698f8c03ca2dea43 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 24 Jan 2016 12:51:53 +0100 Subject: [PATCH 048/195] Update rate every 5 seconds --- drive/progress.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drive/progress.go b/drive/progress.go index 6187058a..915526c2 100644 --- a/drive/progress.go +++ b/drive/progress.go @@ -7,7 +7,11 @@ import ( "time" ) +const MaxDrawInterval = time.Second * 1 +const MaxRateInterval = time.Second * 5 + func getProgressReader(r io.Reader, w io.Writer, size int64) io.Reader { + // Don't wrap reader if output is discarded or size is too small if w == ioutil.Discard || size < 1024 * 1024 { return r } @@ -48,15 +52,15 @@ func (self *Progress) Read(p []byte) (int, error) { self.rateProgress = newProgress } - // Update rate every 3 seconds - if self.rateUpdated.Add(time.Second * 3).Before(now) { + // Update rate every x seconds + if self.rateUpdated.Add(MaxRateInterval).Before(now) { self.rate = calcRate(newProgress - self.rateProgress, self.rateUpdated, now) self.rateUpdated = now self.rateProgress = newProgress } - // Draw progress every second - if self.updated.Add(time.Second).Before(now) || isLast { + // Draw progress every x seconds + if self.updated.Add(MaxDrawInterval).Before(now) || isLast { self.Draw(isLast) } From f40d90416c5ee30babdd3ecfbd76edcbf8fa385a Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 24 Jan 2016 17:56:12 +0100 Subject: [PATCH 049/195] Clear indicator when done, 3 second rate update --- drive/progress.go | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/drive/progress.go b/drive/progress.go index 915526c2..05607041 100644 --- a/drive/progress.go +++ b/drive/progress.go @@ -8,7 +8,7 @@ import ( ) const MaxDrawInterval = time.Second * 1 -const MaxRateInterval = time.Second * 5 +const MaxRateInterval = time.Second * 3 func getProgressReader(r io.Reader, w io.Writer, size int64) io.Reader { // Don't wrap reader if output is discarded or size is too small @@ -61,7 +61,7 @@ func (self *Progress) Read(p []byte) (int, error) { // Draw progress every x seconds if self.updated.Add(MaxDrawInterval).Before(now) || isLast { - self.Draw(isLast) + self.draw(isLast) } // Update last draw time @@ -73,16 +73,15 @@ func (self *Progress) Read(p []byte) (int, error) { return n, err } -func (self *Progress) Draw(isLast bool) { +func (self *Progress) draw(isLast bool) { if self.done { return } - // Clear line - fmt.Fprintf(self.Writer, "\r%50s", "") + self.clear() // Print progress - fmt.Fprintf(self.Writer, "\r%s/%s", formatSize(self.progress, false), formatSize(self.Size, false)) + fmt.Fprintf(self.Writer, "%s/%s", formatSize(self.progress, false), formatSize(self.Size, false)) // Print rate if self.rate > 0 { @@ -90,6 +89,10 @@ func (self *Progress) Draw(isLast bool) { } if isLast { - fmt.Fprintf(self.Writer, "\n") + self.clear() } } + +func (self *Progress) clear() { + fmt.Fprintf(self.Writer, "\r%50s\r", "") +} From 9f5117df50db810a0199b92a1ac4664a65653c64 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 24 Jan 2016 17:57:05 +0100 Subject: [PATCH 050/195] Add missing %s --- drive/delete.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drive/delete.go b/drive/delete.go index cc0aeb32..dc687538 100644 --- a/drive/delete.go +++ b/drive/delete.go @@ -18,7 +18,7 @@ func (self *Drive) Delete(args DeleteArgs) (err error) { err = self.service.Files.Delete(args.Id).Do() if err != nil { - return fmt.Errorf("Failed to delete file", err) + return fmt.Errorf("Failed to delete file: %s", err) } fmt.Fprintf(args.Out, "Removed file '%s'\n", f.Name) From 8ed59df16aec217d2c67a316bfb12b586a8a6d20 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 24 Jan 2016 18:09:34 +0100 Subject: [PATCH 051/195] Implement recursive upload --- drive/mkdir.go | 22 ++++++++---- drive/upload.go | 87 +++++++++++++++++++++++++++++++++++++++++------ gdrive.go | 6 ++++ handlers_drive.go | 3 +- 4 files changed, 100 insertions(+), 18 deletions(-) diff --git a/drive/mkdir.go b/drive/mkdir.go index 8b3acc24..aef2276f 100644 --- a/drive/mkdir.go +++ b/drive/mkdir.go @@ -15,22 +15,32 @@ type MkdirArgs struct { Share bool } -func (self *Drive) Mkdir(args MkdirArgs) (err error) { +func (self *Drive) Mkdir(args MkdirArgs) error { + f, err := self.mkdir(args) + if err != nil { + return err + } + fmt.Printf("Directory '%s' created\n", f.Name) + return nil +} + +func (self *Drive) mkdir(args MkdirArgs) (*drive.File, error) { dstFile := &drive.File{Name: args.Name, MimeType: DirectoryMimeType} // Set parent folders dstFile.Parents = args.Parents - // Create folder + // Create directory f, err := self.service.Files.Create(dstFile).Do() if err != nil { - return fmt.Errorf("Failed to create folder: %s", err) + return nil, fmt.Errorf("Failed to create directory: %s", err) } - PrintFileInfo(PrintFileInfoArgs{Out: args.Out, File: f}) + fmt.Fprintf(args.Out, "\n[directory] id: %s, name: %s\n", f.Id, f.Name) //if args.Share { - // self.Share(TODO) + // self.share(TODO) //} - return + + return f, nil } diff --git a/drive/upload.go b/drive/upload.go index 0c1df0c9..b24e993c 100644 --- a/drive/upload.go +++ b/drive/upload.go @@ -5,12 +5,13 @@ import ( "mime" "os" "io" + "time" "path/filepath" "google.golang.org/api/googleapi" "google.golang.org/api/drive/v3" ) -type UploadFileArgs struct { +type UploadArgs struct { Out io.Writer Progress io.Writer Path string @@ -20,21 +21,81 @@ type UploadFileArgs struct { Recursive bool Share bool ChunkSize int64 + SizeInBytes bool } -func (self *Drive) Upload(args UploadFileArgs) (err error) { +func (self *Drive) Upload(args UploadArgs) error { if args.ChunkSize > intMax() - 1 { return fmt.Errorf("Chunk size is to big, max chunk size for this computer is %d", intMax() - 1) } - srcFile, err := os.Open(args.Path) + return self.upload(args) +} + +func (self *Drive) upload(args UploadArgs) error { + f, err := os.Open(args.Path) if err != nil { return fmt.Errorf("Failed to open file: %s", err) } - srcFileInfo, err := srcFile.Stat() + info, err := f.Stat() + if err != nil { + return fmt.Errorf("Failed getting file metadata: %s", err) + } + + if info.IsDir() && !args.Recursive { + return fmt.Errorf("'%s' is a directory, use --recursive to upload directories", info.Name()) + } else if info.IsDir() { + args.Name = "" + return self.uploadDirectory(args) + } else { + return self.uploadFile(args) + } +} + +func (self *Drive) uploadDirectory(args UploadArgs) error { + srcFile, srcFileInfo, err := openFile(args.Path) + if err != nil { + return err + } + + // Make directory on drive + f, err := self.mkdir(MkdirArgs{ + Out: args.Out, + Name: srcFileInfo.Name(), + Parents: args.Parents, + Share: args.Share, + }) + if err != nil { + return err + } + + // Read files from directory + names, err := srcFile.Readdirnames(0) + if err != nil && err != io.EOF { + return fmt.Errorf("Failed reading directory: %s", err) + } + + for _, name := range names { + // Copy args and set new path and parents + newArgs := args + newArgs.Path = filepath.Join(args.Path, name) + newArgs.Parents = []string{f.Id} + + // Upload + err = self.upload(newArgs) + if err != nil { + return err + } + } + + return nil +} + +func (self *Drive) uploadFile(args UploadArgs) error { + srcFile, srcFileInfo, err := openFile(args.Path) if err != nil { - return fmt.Errorf("Failed to read file metadata: %s", err) + return err } // Instantiate empty drive file @@ -63,16 +124,20 @@ func (self *Drive) Upload(args UploadFileArgs) (err error) { // Wrap file in progress reader srcReader := getProgressReader(srcFile, args.Progress, srcFileInfo.Size()) - f, err := self.service.Files.Create(dstFile).Media(srcReader, chunkSize).Do() + fmt.Fprintf(args.Out, "\nUploading %s...\n", args.Path) + started := time.Now() + + f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size", "md5Checksum").Media(srcReader, chunkSize).Do() if err != nil { return fmt.Errorf("Failed to upload file: %s", err) } - fmt.Fprintf(args.Out, "Uploaded '%s' at %s, total %d\n", f.Name, "x/s", f.Size) - //if args.Share { - // self.Share(TODO) - //} - return + // Calculate average upload rate + rate := calcRate(f.Size, started, time.Now()) + + fmt.Fprintf(args.Out, "[file] id: %s, md5: %s, name: %s\n", f.Id, f.Md5Checksum, f.Name) + fmt.Fprintf(args.Out, "Uploaded '%s' at %s/s, total %s\n", f.Name, formatSize(rate, args.SizeInBytes), formatSize(f.Size, args.SizeInBytes)) + return nil } type UploadStreamArgs struct { diff --git a/gdrive.go b/gdrive.go index 86989111..a1d49e33 100644 --- a/gdrive.go +++ b/gdrive.go @@ -193,6 +193,12 @@ func main() { Description: "Share file", OmitValue: true, }, + cli.BoolFlag{ + Name: "sizeInBytes", + Patterns: []string{"--bytes"}, + Description: "Show size in bytes", + OmitValue: true, + }, cli.IntFlag{ Name: "chunksize", Patterns: []string{"--chunksize"}, diff --git a/handlers_drive.go b/handlers_drive.go index 00ff7819..b5be315f 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -55,7 +55,7 @@ func downloadRevisionHandler(ctx cli.Context) { func uploadHandler(ctx cli.Context) { args := ctx.Args() - err := newDrive(args).Upload(drive.UploadFileArgs{ + err := newDrive(args).Upload(drive.UploadArgs{ Out: os.Stdout, Progress: progressWriter(args.Bool("noProgress")), Path: args.String("path"), @@ -64,6 +64,7 @@ func uploadHandler(ctx cli.Context) { Mime: args.String("mime"), Recursive: args.Bool("recursive"), Share: args.Bool("share"), + SizeInBytes: args.Bool("sizeInBytes"), ChunkSize: args.Int64("chunksize"), }) checkErr(err) From c6c7b53d3b8011b75b0796096ac93d53cbe041b7 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 24 Jan 2016 18:54:05 +0100 Subject: [PATCH 052/195] Add openFile function --- drive/upload.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drive/upload.go b/drive/upload.go index b24e993c..3e0f50ed 100644 --- a/drive/upload.go +++ b/drive/upload.go @@ -180,3 +180,17 @@ func (self *Drive) UploadStream(args UploadStreamArgs) (err error) { //} return } + +func openFile(path string) (*os.File, os.FileInfo, error) { + f, err := os.Open(path) + if err != nil { + return nil, nil, fmt.Errorf("Failed to open file: %s", err) + } + + info, err := f.Stat() + if err != nil { + return nil, nil, fmt.Errorf("Failed getting file metadata: %s", err) + } + + return f, info, nil +} From 460ae5f3bc3c401cd256767892829bd070b34db1 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 24 Jan 2016 23:08:34 +0100 Subject: [PATCH 053/195] Remove --bytes flag from upload --- drive/upload.go | 3 +-- gdrive.go | 6 ------ handlers_drive.go | 1 - 3 files changed, 1 insertion(+), 9 deletions(-) diff --git a/drive/upload.go b/drive/upload.go index 3e0f50ed..5e70dd88 100644 --- a/drive/upload.go +++ b/drive/upload.go @@ -21,7 +21,6 @@ type UploadArgs struct { Recursive bool Share bool ChunkSize int64 - SizeInBytes bool } func (self *Drive) Upload(args UploadArgs) error { @@ -136,7 +135,7 @@ func (self *Drive) uploadFile(args UploadArgs) error { rate := calcRate(f.Size, started, time.Now()) fmt.Fprintf(args.Out, "[file] id: %s, md5: %s, name: %s\n", f.Id, f.Md5Checksum, f.Name) - fmt.Fprintf(args.Out, "Uploaded '%s' at %s/s, total %s\n", f.Name, formatSize(rate, args.SizeInBytes), formatSize(f.Size, args.SizeInBytes)) + fmt.Fprintf(args.Out, "Uploaded '%s' at %s/s, total %s\n", f.Name, formatSize(rate, false), formatSize(f.Size, false)) return nil } diff --git a/gdrive.go b/gdrive.go index a1d49e33..86989111 100644 --- a/gdrive.go +++ b/gdrive.go @@ -193,12 +193,6 @@ func main() { Description: "Share file", OmitValue: true, }, - cli.BoolFlag{ - Name: "sizeInBytes", - Patterns: []string{"--bytes"}, - Description: "Show size in bytes", - OmitValue: true, - }, cli.IntFlag{ Name: "chunksize", Patterns: []string{"--chunksize"}, diff --git a/handlers_drive.go b/handlers_drive.go index b5be315f..04075f4e 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -64,7 +64,6 @@ func uploadHandler(ctx cli.Context) { Mime: args.String("mime"), Recursive: args.Bool("recursive"), Share: args.Bool("share"), - SizeInBytes: args.Bool("sizeInBytes"), ChunkSize: args.Int64("chunksize"), }) checkErr(err) From 04a48c8ea2dbf9a3aaaa845f6cec3b5103dcf12a Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 24 Jan 2016 23:09:53 +0100 Subject: [PATCH 054/195] Implement recursive download --- drive/download.go | 91 +++++++++++++++++++++++++++++++++++++++++------ drive/util.go | 9 +++++ gdrive.go | 11 ++++++ handlers_drive.go | 4 ++- 4 files changed, 104 insertions(+), 11 deletions(-) diff --git a/drive/download.go b/drive/download.go index e5674ad6..9434ca6e 100644 --- a/drive/download.go +++ b/drive/download.go @@ -4,25 +4,46 @@ import ( "fmt" "io" "os" + "time" + "path/filepath" + "google.golang.org/api/drive/v3" ) -type DownloadFileArgs struct { +type DownloadArgs struct { Out io.Writer Progress io.Writer Id string + Path string Force bool + Recursive bool Stdout bool } -func (self *Drive) Download(args DownloadFileArgs) (err error) { - getFile := self.service.Files.Get(args.Id) +func (self *Drive) Download(args DownloadArgs) error { + return self.download(args) +} - f, err := getFile.Do() +func (self *Drive) download(args DownloadArgs) error { + f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "mimeType", "md5Checksum").Do() if err != nil { return fmt.Errorf("Failed to get file: %s", err) } - res, err := getFile.Download() + if isDir(f) && !args.Recursive { + return fmt.Errorf("'%s' is a directory, use --recursive to download directories", f.Name) + } else if isDir(f) && args.Recursive { + return self.downloadDirectory(f, args) + } else if isBinary(f) { + return self.downloadBinary(f, args) + } else if !args.Recursive { + return fmt.Errorf("'%s' is a google document and must be exported, see the export command", f.Name) + } + + return nil +} + +func (self *Drive) downloadBinary(f *drive.File, args DownloadArgs) error { + res, err := self.service.Files.Get(f.Id).Download() if err != nil { return fmt.Errorf("Failed to download file: %s", err) } @@ -39,13 +60,20 @@ func (self *Drive) Download(args DownloadFileArgs) (err error) { return err } + filename := filepath.Join(args.Path, f.Name) + // Check if file exists - if !args.Force && fileExists(f.Name) { - return fmt.Errorf("File '%s' already exists, use --force to overwrite", f.Name) + if !args.Force && fileExists(filename) { + return fmt.Errorf("File '%s' already exists, use --force to overwrite", filename) + } + + // Ensure any parent directories exists + if err = mkdir(filename); err != nil { + return err } // Create new file - outFile, err := os.Create(f.Name) + outFile, err := os.Create(filename) if err != nil { return fmt.Errorf("Unable to create new file: %s", err) } @@ -53,16 +81,59 @@ func (self *Drive) Download(args DownloadFileArgs) (err error) { // Close file on function exit defer outFile.Close() + fmt.Fprintf(args.Out, "\nDownloading %s...\n", f.Name) + started := time.Now() + // Save file to disk bytes, err := io.Copy(outFile, srcReader) if err != nil { return fmt.Errorf("Failed saving file: %s", err) } - fmt.Fprintf(args.Out, "Downloaded '%s' at %s, total %d\n", f.Name, "x/s", bytes) + // Calculate average download rate + rate := calcRate(f.Size, started, time.Now()) + + fmt.Fprintf(args.Out, "Downloaded '%s' at %s/s, total %s\n", filename, formatSize(rate, false), formatSize(bytes, false)) //if deleteSourceFile { // self.Delete(args.Id) //} - return + return nil +} + +func (self *Drive) downloadDirectory(parent *drive.File, args DownloadArgs) error { + query := fmt.Sprintf("'%s' in parents", parent.Id) + fileList, err := self.service.Files.List().Q(query).Fields("files(id,name)").Do() + if err != nil { + return fmt.Errorf("Failed listing files: %s", err) + } + + // Update download path + path := filepath.Join(args.Path, parent.Name) + + for _, f := range fileList.Files { + err = self.download(DownloadArgs{ + Out: args.Out, + Id: f.Id, + Progress: args.Progress, + Force: args.Force, + Path: path, + Recursive: args.Recursive, + Stdout: false, + }) + + if err != nil { + return err + } + } + + return nil +} + +func isDir(f *drive.File) bool { + return f.MimeType == DirectoryMimeType +} + +func isBinary(f *drive.File) bool { + return f.Md5Checksum != "" } diff --git a/drive/util.go b/drive/util.go index 8b3d1716..f4922862 100644 --- a/drive/util.go +++ b/drive/util.go @@ -3,6 +3,7 @@ package drive import ( "os" "fmt" + "path/filepath" "strings" "strconv" "unicode/utf8" @@ -122,6 +123,14 @@ func fileExists(path string) bool { return false } +func mkdir(path string) error { + dir := filepath.Dir(path) + if fileExists(dir) { + return nil + } + return os.MkdirAll(dir, 0775) +} + func intMax() int64 { return 1 << (strconv.IntSize - 1) - 1 } diff --git a/gdrive.go b/gdrive.go index 86989111..71e4a7bc 100644 --- a/gdrive.go +++ b/gdrive.go @@ -110,6 +110,17 @@ func main() { Description: "Overwrite existing file", OmitValue: true, }, + cli.StringFlag{ + Name: "path", + Patterns: []string{"--path"}, + Description: "Download path", + }, + cli.BoolFlag{ + Name: "recursive", + Patterns: []string{"-r", "--recursive"}, + Description: "Download directory recursively, documents will be skipped", + OmitValue: true, + }, cli.BoolFlag{ Name: "noProgress", Patterns: []string{"--no-progress"}, diff --git a/handlers_drive.go b/handlers_drive.go index 04075f4e..38c1e1c7 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -30,10 +30,12 @@ func listHandler(ctx cli.Context) { func downloadHandler(ctx cli.Context) { args := ctx.Args() - err := newDrive(args).Download(drive.DownloadFileArgs{ + err := newDrive(args).Download(drive.DownloadArgs{ Out: os.Stdout, Id: args.String("id"), Force: args.Bool("force"), + Path: args.String("path"), + Recursive: args.Bool("recursive"), Stdout: args.Bool("stdout"), Progress: progressWriter(args.Bool("noProgress")), }) From 625d65cd145a1c392a7c5a7968d738d8dc55c75b Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 24 Jan 2016 23:19:42 +0100 Subject: [PATCH 055/195] Copy and update given args --- drive/download.go | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/drive/download.go b/drive/download.go index 9434ca6e..468f96f3 100644 --- a/drive/download.go +++ b/drive/download.go @@ -108,20 +108,16 @@ func (self *Drive) downloadDirectory(parent *drive.File, args DownloadArgs) erro return fmt.Errorf("Failed listing files: %s", err) } - // Update download path - path := filepath.Join(args.Path, parent.Name) + newPath := filepath.Join(args.Path, parent.Name) for _, f := range fileList.Files { - err = self.download(DownloadArgs{ - Out: args.Out, - Id: f.Id, - Progress: args.Progress, - Force: args.Force, - Path: path, - Recursive: args.Recursive, - Stdout: false, - }) + // Copy args and update changed fields + newArgs := args + newArgs.Path = newPath + newArgs.Id = f.Id + newArgs.Stdout = false + err = self.download(newArgs) if err != nil { return err } From 11f577b5dbe0429f206c80b674d666d44b3a79c2 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 24 Jan 2016 23:20:37 +0100 Subject: [PATCH 056/195] Change order of flags --- gdrive.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/gdrive.go b/gdrive.go index 71e4a7bc..7d73dce4 100644 --- a/gdrive.go +++ b/gdrive.go @@ -110,17 +110,17 @@ func main() { Description: "Overwrite existing file", OmitValue: true, }, - cli.StringFlag{ - Name: "path", - Patterns: []string{"--path"}, - Description: "Download path", - }, cli.BoolFlag{ Name: "recursive", Patterns: []string{"-r", "--recursive"}, Description: "Download directory recursively, documents will be skipped", OmitValue: true, }, + cli.StringFlag{ + Name: "path", + Patterns: []string{"--path"}, + Description: "Download path", + }, cli.BoolFlag{ Name: "noProgress", Patterns: []string{"--no-progress"}, From 3a669be42beacbde675088f4be12a7a9b078acd7 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 24 Jan 2016 23:28:55 +0100 Subject: [PATCH 057/195] Fix progress update --- drive/progress.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drive/progress.go b/drive/progress.go index 05607041..9d4eb5a7 100644 --- a/drive/progress.go +++ b/drive/progress.go @@ -62,11 +62,9 @@ func (self *Progress) Read(p []byte) (int, error) { // Draw progress every x seconds if self.updated.Add(MaxDrawInterval).Before(now) || isLast { self.draw(isLast) + self.updated = now } - // Update last draw time - self.updated = now - // Mark as done if error occurs self.done = isLast From 35bbe302d53346426541f63f134cb02a11c50e78 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Mon, 25 Jan 2016 22:34:58 +0100 Subject: [PATCH 058/195] Implement import --- drive/import.go | 57 +++++++++++++++++++++++++++++++++++++++++++++++ drive/upload.go | 11 ++++----- gdrive.go | 27 ++++++++++++++++++++++ handlers_drive.go | 12 ++++++++++ 4 files changed, 102 insertions(+), 5 deletions(-) create mode 100644 drive/import.go diff --git a/drive/import.go b/drive/import.go new file mode 100644 index 00000000..cb82508f --- /dev/null +++ b/drive/import.go @@ -0,0 +1,57 @@ +package drive + +import ( + "io" + "io/ioutil" + "fmt" + "strings" + "mime" + "path/filepath" +) + +type ImportArgs struct { + Out io.Writer + Progress io.Writer + Path string + Share bool + Parents []string +} + +func (self *Drive) Import(args ImportArgs) error { + fromMime := getMimeType(args.Path) + if fromMime == "" { + return fmt.Errorf("Could not determine mime type of file") + } + + about, err := self.service.About.Get().Fields("importFormats").Do() + if err != nil { + return fmt.Errorf("Failed to get about: %s", err) + } + + toMimes, ok := about.ImportFormats[fromMime] + if !ok || len(toMimes) == 0 { + return fmt.Errorf("Mime type '%s' is not supported for import", fromMime) + } + + f, err := self.uploadFile(UploadArgs{ + Out: ioutil.Discard, + Progress: args.Progress, + Path: args.Path, + Parents: args.Parents, + Mime: toMimes[0], + Share: args.Share, + }) + if err != nil { + return err + } + + fmt.Fprintf(args.Out, "[document] id: %s, name: %s\n", f.Id, f.Name) + fmt.Fprintf(args.Out, "Imported %s with mime type: '%s'\n", args.Path, toMimes[0]) + + return nil +} + +func getMimeType(path string) string { + t := mime.TypeByExtension(filepath.Ext(path)) + return strings.Split(t, ";")[0] +} diff --git a/drive/upload.go b/drive/upload.go index 5e70dd88..1e145f72 100644 --- a/drive/upload.go +++ b/drive/upload.go @@ -48,7 +48,8 @@ func (self *Drive) upload(args UploadArgs) error { args.Name = "" return self.uploadDirectory(args) } else { - return self.uploadFile(args) + _, err := self.uploadFile(args) + return err } } @@ -91,10 +92,10 @@ func (self *Drive) uploadDirectory(args UploadArgs) error { return nil } -func (self *Drive) uploadFile(args UploadArgs) error { +func (self *Drive) uploadFile(args UploadArgs) (*drive.File, error) { srcFile, srcFileInfo, err := openFile(args.Path) if err != nil { - return err + return nil, err } // Instantiate empty drive file @@ -128,7 +129,7 @@ func (self *Drive) uploadFile(args UploadArgs) error { f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size", "md5Checksum").Media(srcReader, chunkSize).Do() if err != nil { - return fmt.Errorf("Failed to upload file: %s", err) + return nil, fmt.Errorf("Failed to upload file: %s", err) } // Calculate average upload rate @@ -136,7 +137,7 @@ func (self *Drive) uploadFile(args UploadArgs) error { fmt.Fprintf(args.Out, "[file] id: %s, md5: %s, name: %s\n", f.Id, f.Md5Checksum, f.Name) fmt.Fprintf(args.Out, "Uploaded '%s' at %s/s, total %s\n", f.Name, formatSize(rate, false), formatSize(f.Size, false)) - return nil + return f, nil } type UploadStreamArgs struct { diff --git a/gdrive.go b/gdrive.go index 7d73dce4..754de48b 100644 --- a/gdrive.go +++ b/gdrive.go @@ -310,6 +310,33 @@ func main() { }, }, }, + &cli.Handler{ + Pattern: "[global] import [options] ", + Description: "Upload and convert file to a google document, see 'about import' for available conversions", + Callback: importHandler, + Flags: cli.Flags{ + "global": globalFlags, + "options": []cli.Flag{ + cli.StringSliceFlag{ + Name: "parent", + Patterns: []string{"-p", "--parent"}, + Description: "Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents", + }, + cli.BoolFlag{ + Name: "noProgress", + Patterns: []string{"--no-progress"}, + Description: "Hide progress", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "share", + Patterns: []string{"--share"}, + Description: "Share file", + OmitValue: true, + }, + }, + }, + }, &cli.Handler{ Pattern: "[global] export [options] ", Description: "Export a google document", diff --git a/handlers_drive.go b/handlers_drive.go index 38c1e1c7..a8303ede 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -112,6 +112,18 @@ func infoHandler(ctx cli.Context) { checkErr(err) } +func importHandler(ctx cli.Context) { + args := ctx.Args() + err := newDrive(args).Import(drive.ImportArgs{ + Out: os.Stdout, + Path: args.String("path"), + Parents: args.StringSlice("parent"), + Share: args.Bool("share"), + Progress: progressWriter(args.Bool("noProgress")), + }) + checkErr(err) +} + func exportHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).Export(drive.ExportArgs{ From d52aa78ffadf6f4ef2943af207b8a21ef88881ee Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Wed, 27 Jan 2016 20:52:07 +0100 Subject: [PATCH 059/195] Add file type column --- drive/list.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/drive/list.go b/drive/list.go index 5649a124..20dcda38 100644 --- a/drive/list.go +++ b/drive/list.go @@ -17,7 +17,7 @@ type ListFilesArgs struct { } func (self *Drive) List(args ListFilesArgs) (err error) { - fileList, err := self.service.Files.List().PageSize(args.MaxFiles).Q(args.Query).Fields("nextPageToken", "files(id,name,size,createdTime)").Do() + fileList, err := self.service.Files.List().PageSize(args.MaxFiles).Q(args.Query).Fields("files(id,name,md5Checksum,mimeType,size,createdTime)").Do() if err != nil { return fmt.Errorf("Failed listing files: %s", err) } @@ -46,13 +46,14 @@ func PrintFileList(args PrintFileListArgs) { w.Init(args.Out, 0, 0, 3, ' ', 0) if !args.SkipHeader { - fmt.Fprintln(w, "Id\tName\tSize\tCreated") + fmt.Fprintln(w, "Id\tName\tType\tSize\tCreated") } for _, f := range args.Files { - fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", f.Id, truncateString(f.Name, args.NameWidth), + filetype(f), formatSize(f.Size, args.SizeInBytes), formatDatetime(f.CreatedTime), ) @@ -60,3 +61,12 @@ func PrintFileList(args PrintFileListArgs) { w.Flush() } + +func filetype(f *drive.File) string { + if isDir(f) { + return "dir" + } else if isBinary(f) { + return "bin" + } + return "doc" +} From fa5ed8b1c52f435067ec39959305ebc4e74ac609 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Wed, 27 Jan 2016 21:39:21 +0100 Subject: [PATCH 060/195] Add sort order support in listing --- drive/list.go | 3 ++- gdrive.go | 5 +++++ handlers_drive.go | 1 + 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/drive/list.go b/drive/list.go index 20dcda38..d3d8f3d0 100644 --- a/drive/list.go +++ b/drive/list.go @@ -12,12 +12,13 @@ type ListFilesArgs struct { MaxFiles int64 NameWidth int64 Query string + SortOrder string SkipHeader bool SizeInBytes bool } func (self *Drive) List(args ListFilesArgs) (err error) { - fileList, err := self.service.Files.List().PageSize(args.MaxFiles).Q(args.Query).Fields("files(id,name,md5Checksum,mimeType,size,createdTime)").Do() + fileList, err := self.service.Files.List().PageSize(args.MaxFiles).Q(args.Query).OrderBy(args.SortOrder).Fields("files(id,name,md5Checksum,mimeType,size,createdTime)").Do() if err != nil { return fmt.Errorf("Failed listing files: %s", err) } diff --git a/gdrive.go b/gdrive.go index 754de48b..2d25f3d9 100644 --- a/gdrive.go +++ b/gdrive.go @@ -48,6 +48,11 @@ func main() { Description: fmt.Sprintf(`Default query: "%s". See https://developers.google.com/drive/search-parameters`, DefaultQuery), DefaultValue: DefaultQuery, }, + cli.StringFlag{ + Name: "sortOrder", + Patterns: []string{"--order"}, + Description: "Sort order. See https://godoc.org/google.golang.org/api/drive/v3#FilesListCall.OrderBy", + }, cli.IntFlag{ Name: "nameWidth", Patterns: []string{"--name-width"}, diff --git a/handlers_drive.go b/handlers_drive.go index a8303ede..f17338bd 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -22,6 +22,7 @@ func listHandler(ctx cli.Context) { MaxFiles: args.Int64("maxFiles"), NameWidth: args.Int64("nameWidth"), Query: args.String("query"), + SortOrder: args.String("sortOrder"), SkipHeader: args.Bool("skipHeader"), SizeInBytes: args.Bool("sizeInBytes"), }) From 1c5e8879a7e939f8799aca72f7c6d3b0b8c44144 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Thu, 28 Jan 2016 22:20:16 +0100 Subject: [PATCH 061/195] Implement list changes --- drive/changes.go | 103 ++++++++++++++++++++++++++++++++++++++++++++++ gdrive.go | 41 ++++++++++++++++++ handlers_drive.go | 13 ++++++ 3 files changed, 157 insertions(+) create mode 100644 drive/changes.go diff --git a/drive/changes.go b/drive/changes.go new file mode 100644 index 00000000..1d9a89dc --- /dev/null +++ b/drive/changes.go @@ -0,0 +1,103 @@ +package drive + +import ( + "fmt" + "io" + "text/tabwriter" + "google.golang.org/api/drive/v3" +) + +type ListChangesArgs struct { + Out io.Writer + PageToken string + MaxChanges int64 + Now bool + NameWidth int64 + SkipHeader bool +} + +func (self *Drive) ListChanges(args ListChangesArgs) error { + if args.Now { + pageToken, err := self.GetChangesStartPageToken() + if err != nil { + return err + } + + fmt.Fprintf(args.Out, "Page token: %s\n", pageToken) + return nil + } + + changeList, err := self.service.Changes.List(args.PageToken).PageSize(args.MaxChanges).RestrictToMyDrive(true).Fields("newStartPageToken", "nextPageToken", "changes(fileId,removed,time,file(id,name,md5Checksum,mimeType,createdTime,modifiedTime))").Do() + if err != nil { + return fmt.Errorf("Failed listing changes: %s", err) + } + + PrintChanges(PrintChangesArgs{ + Out: args.Out, + ChangeList: changeList, + NameWidth: int(args.NameWidth), + SkipHeader: args.SkipHeader, + }) + + return nil +} + +func (self *Drive) GetChangesStartPageToken() (string, error) { + res, err := self.service.Changes.GetStartPageToken().Do() + if err != nil { + return "", fmt.Errorf("Failed getting start page token: %s", err) + } + + return res.StartPageToken, nil +} + +type PrintChangesArgs struct { + Out io.Writer + ChangeList *drive.ChangeList + NameWidth int + SkipHeader bool +} + +func PrintChanges(args PrintChangesArgs) { + w := new(tabwriter.Writer) + w.Init(args.Out, 0, 0, 3, ' ', 0) + + if !args.SkipHeader { + fmt.Fprintln(w, "Id\tName\tAction\tTime") + } + + for _, c := range args.ChangeList.Changes { + var name string + var action string + + if c.Removed { + action = "remove" + } else { + name = c.File.Name + action = "update" + } + + fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", + c.FileId, + truncateString(name, args.NameWidth), + action, + formatDatetime(c.Time), + ) + } + + if len(args.ChangeList.Changes) > 0 { + w.Flush() + pageToken, hasMore := nextChangesPageToken(args.ChangeList) + fmt.Fprintf(args.Out, "\nToken: %s, more: %t\n", pageToken, hasMore) + } else { + fmt.Fprintln(args.Out, "No changes") + } +} + +func nextChangesPageToken(cl *drive.ChangeList) (string, bool) { + if cl.NextPageToken != "" { + return cl.NextPageToken, true + } + + return cl.NewStartPageToken, false +} diff --git a/gdrive.go b/gdrive.go index 2d25f3d9..a675ca10 100644 --- a/gdrive.go +++ b/gdrive.go @@ -10,6 +10,7 @@ const Name = "gdrive" const Version = "2.0.0" const DefaultMaxFiles = 30 +const DefaultMaxChanges = 100 const DefaultNameWidth = 40 const DefaultUploadChunkSize = 8 * 1024 * 1024 const DefaultQuery = "trashed = false and 'me' in owners" @@ -74,6 +75,46 @@ func main() { }, }, }, + &cli.Handler{ + Pattern: "[global] list changes [options]", + Description: "List file changes", + Callback: listChangesHandler, + Flags: cli.Flags{ + "global": globalFlags, + "options": []cli.Flag{ + cli.IntFlag{ + Name: "maxChanges", + Patterns: []string{"-m", "--max"}, + Description: fmt.Sprintf("Max changes to list, default: %d", DefaultMaxChanges), + DefaultValue: DefaultMaxChanges, + }, + cli.StringFlag{ + Name: "pageToken", + Patterns: []string{"--since"}, + Description: fmt.Sprintf("Page token to start listing changes from"), + DefaultValue: "1", + }, + cli.BoolFlag{ + Name: "now", + Patterns: []string{"--now"}, + Description: fmt.Sprintf("Get latest page token"), + OmitValue: true, + }, + cli.IntFlag{ + Name: "nameWidth", + Patterns: []string{"--name-width"}, + Description: fmt.Sprintf("Width of name column, default: %d, minimum: 9, use 0 for full width", DefaultNameWidth), + DefaultValue: DefaultNameWidth, + }, + cli.BoolFlag{ + Name: "skipHeader", + Patterns: []string{"--no-header"}, + Description: "Dont print the header", + OmitValue: true, + }, + }, + }, + }, &cli.Handler{ Pattern: "[global] list revisions [options] ", Description: "List file revisions", diff --git a/handlers_drive.go b/handlers_drive.go index f17338bd..297b9f40 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -29,6 +29,19 @@ func listHandler(ctx cli.Context) { checkErr(err) } +func listChangesHandler(ctx cli.Context) { + args := ctx.Args() + err := newDrive(args).ListChanges(drive.ListChangesArgs{ + Out: os.Stdout, + PageToken: args.String("pageToken"), + MaxChanges: args.Int64("maxChanges"), + Now: args.Bool("now"), + NameWidth: args.Int64("nameWidth"), + SkipHeader: args.Bool("skipHeader"), + }) + checkErr(err) +} + func downloadHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).Download(drive.DownloadArgs{ From dcb2010e420789de488fd82b850d6a996646b428 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 30 Jan 2016 23:07:07 +0100 Subject: [PATCH 062/195] Initial upload sync implementation --- drive/upload_sync.go | 480 +++++++++++++++++++++++++++++++++++++++++++ drive/util.go | 23 +++ gdrive.go | 33 +++ handlers_drive.go | 13 ++ 4 files changed, 549 insertions(+) create mode 100644 drive/upload_sync.go diff --git a/drive/upload_sync.go b/drive/upload_sync.go new file mode 100644 index 00000000..26b22b4f --- /dev/null +++ b/drive/upload_sync.go @@ -0,0 +1,480 @@ +package drive + +import ( + "fmt" + "io" + "os" + "sort" + "path/filepath" + "github.com/gyuho/goraph/graph" + "google.golang.org/api/googleapi" + "google.golang.org/api/drive/v3" +) + +type UploadSyncArgs struct { + Out io.Writer + Progress io.Writer + Path string + Parent string + DeleteRemote bool + ChunkSize int64 +} + +func (self *Drive) UploadSync(args UploadSyncArgs) error { + if args.ChunkSize > intMax() - 1 { + return fmt.Errorf("Chunk size is to big, max chunk size for this computer is %d", intMax() - 1) + } + + rootDir, created, err := self.getOrCreateSyncRootDir(args) + if err != nil { + return err + } + + if created { + fmt.Fprintln(args.Out, "Did not find any existing files, starting from scratch") + } else { + fmt.Fprintln(args.Out, "Found existing root directory, let's see whats changed") + } + + // TODO: do concurrently + fmt.Println("preparing local") + localFiles, err := prepareLocalFiles(args.Path) + if err != nil { + return err + } + + fmt.Println("preparing remote") + remoteFiles, err := self.prepareRemoteFiles(rootDir) + if err != nil { + return err + } + + files := &syncFiles{ + root: &remoteFile{file: rootDir}, + local: localFiles, + remote: remoteFiles, + } + + // Create missing directories + files, err = self.createMissingRemoteDirs(files) + if err != nil { + return err + } + + // Upload missing files + err = self.uploadMissingFiles(files, args) + if err != nil { + return err + } + + // Update modified files + err = self.updateChangedFiles(files, args) + if err != nil { + return err + } + + return nil +} + +func (self *Drive) getOrCreateSyncRootDir(args UploadSyncArgs) (*drive.File, bool, error) { + // Root dir name + name := filepath.Base(args.Path) + + // Build root dir query + query := fmt.Sprintf("name = '%s' and appProperties has {key='isSyncRoot' and value='true'}", name) + if args.Parent != "" { + query += fmt.Sprintf(" and '%s' in parents", args.Parent) + } + + // Find root dir + fileList, err := self.service.Files.List().Q(query).Fields("files(id,name,mimeType)").Do() + if err != nil { + return nil, false, fmt.Errorf("Failed listing files: %s", err) + } + + // More than one root dir found + if len(fileList.Files) > 1 { + return nil, false, fmt.Errorf("More than one root directories found, aborting...") + } + + // Root dir found, return + if len(fileList.Files) == 1 { + return fileList.Files[0], false, nil + } + + // Root dir not found, create new + dstFile := &drive.File{ + Name: name, + MimeType: DirectoryMimeType, + AppProperties: map[string]string{"isSyncRoot": "true"}, + } + + // Add parent if provided + if args.Parent != "" { + dstFile.Parents = []string{args.Parent} + } + + // Create directory + f, err := self.service.Files.Create(dstFile).Do() + if err != nil { + return nil, false, fmt.Errorf("Failed to create directory: %s", err) + } + + return f, true, nil +} + +func (self *Drive) createMissingRemoteDirs(files *syncFiles) (*syncFiles, error) { + missingDirs := files.filterMissingRemoteDirs() + + // Sort directories so that the dirs with the shortest path comes first + sort.Sort(byPathLength(missingDirs)) + + for _, lf := range missingDirs { + parentPath := parentFilePath(lf.relPath) + parent, ok := files.findRemoteByPath(parentPath) + if !ok { + return nil, fmt.Errorf("Could not find remote directory with path '%s', aborting...", parentPath) + } + + dstFile := &drive.File{ + Name: lf.info.Name(), + MimeType: DirectoryMimeType, + Parents: []string{parent.file.Id}, + AppProperties: map[string]string{"syncRootId": files.root.file.Id}, + } + + fmt.Printf("Creating directory: %s\n", filepath.Join(files.root.file.Name, lf.relPath)) + + f, err := self.service.Files.Create(dstFile).Do() + if err != nil { + return nil, fmt.Errorf("Failed to create directory: %s", err) + } + + files.remote = append(files.remote, &remoteFile{ + relPath: lf.relPath, + file: f, + }) + } + + return files, nil +} + +func (self *Drive) uploadMissingFiles(files *syncFiles, args UploadSyncArgs) error { + for _, lf := range files.filterMissingRemoteFiles() { + parentPath := parentFilePath(lf.relPath) + parent, ok := files.findRemoteByPath(parentPath) + if !ok { + return fmt.Errorf("Could not find remote directory with path '%s', aborting...", parentPath) + } + + newArgs := args + newArgs.Path = lf.absPath + newArgs.Parent = parent.file.Id + + fmt.Printf("%s -> %s\n", lf.absPath, filepath.Join(files.root.file.Name, lf.relPath)) + err := self.uploadMissingFile(files.root.file.Id, lf, newArgs) + if err != nil { + return err + } + } + + return nil +} + +func (self *Drive) updateChangedFiles(files *syncFiles, args UploadSyncArgs) error { + for _, cf := range files.filterChangedLocalFiles() { + fmt.Println(cf.local.absPath) + + fmt.Printf("Updating %s -> %s\n", cf.local.absPath, filepath.Join(files.root.file.Name, cf.local.relPath)) + err := self.updateChangedFile(cf, args) + if err != nil { + return err + } + } + + return nil +} + +func (self *Drive) uploadMissingFile(rootId string, lf *localFile, args UploadSyncArgs) error { + srcFile, err := os.Open(lf.absPath) + if err != nil { + return fmt.Errorf("Failed to open file: %s", err) + } + + // Instantiate drive file + dstFile := &drive.File{ + Name: lf.info.Name(), + Parents: []string{args.Parent}, + AppProperties: map[string]string{"syncRootId": rootId}, + } + + // Chunk size option + chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) + + // Wrap file in progress reader + srcReader := getProgressReader(srcFile, args.Progress, lf.info.Size()) + + _, err = self.service.Files.Create(dstFile).Fields("id", "name", "size", "md5Checksum").Media(srcReader, chunkSize).Do() + if err != nil { + return fmt.Errorf("Failed to upload file: %s", err) + } + + return nil +} + +func (self *Drive) updateChangedFile(cf *changedFile, args UploadSyncArgs) error { + srcFile, err := os.Open(cf.local.absPath) + if err != nil { + return fmt.Errorf("Failed to open file: %s", err) + } + + // Instantiate drive file + dstFile := &drive.File{} + + // Chunk size option + chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) + + // Wrap file in progress reader + srcReader := getProgressReader(srcFile, args.Progress, cf.local.info.Size()) + + _, err = self.service.Files.Update(cf.remote.file.Id, dstFile).Media(srcReader, chunkSize).Do() + if err != nil { + return fmt.Errorf("Failed to update file: %s", err) + } + + return nil +} + +func (self *Drive) prepareRemoteFiles(rootDir *drive.File) ([]*remoteFile, error) { + // Find all files which has rootDir as root + query := fmt.Sprintf("appProperties has {key='syncRootId' and value='%s'}", rootDir.Id) + fileList, err := self.service.Files.List().Q(query).Fields("files(id,name,parents,md5Checksum,mimeType)").Do() + if err != nil { + return nil, fmt.Errorf("Failed listing files: %s", err) + } + + if err := checkFiles(fileList.Files); err != nil { + return nil, err + } + + relPaths, err := prepareRemoteRelPaths(rootDir.Id, fileList.Files) + if err != nil { + return nil, err + } + + var remoteFiles []*remoteFile + for _, f := range fileList.Files { + relPath, ok := relPaths[f.Id] + if !ok { + return nil, fmt.Errorf("File %s does not have a valid parent, aborting...", f.Id) + } + remoteFiles = append(remoteFiles, &remoteFile{ + relPath: relPath, + file: f, + }) + } + + return remoteFiles, nil +} + +func checkFiles(files []*drive.File) error { + uniq := map[string]string{} + + for _, f := range files { + // Ensure all files have exactly one parent + if len(f.Parents) != 1 { + return fmt.Errorf("File %s does not have exacly one parent, aborting...", f.Id) + } + + // Ensure that there are no duplicate files + uniqKey := f.Name + f.Parents[0] + if dupeId, isDupe := uniq[uniqKey]; isDupe { + return fmt.Errorf("Found name collision between %s and %s, aborting", f.Id, dupeId) + } + uniq[uniqKey] = f.Id + } + + return nil +} + +func prepareRemoteRelPaths(rootId string, files []*drive.File) (map[string]string, error) { + names := map[string]string{} + idGraph := graph.NewDefaultGraph() + + for _, f := range files { + // Store directory name for quick lookup + names[f.Id] = f.Name + + // Store path between parent and child folder + idGraph.AddVertex(f.Id) + idGraph.AddVertex(f.Parents[0]) + idGraph.AddEdge(f.Parents[0], f.Id, 0) + } + + paths := map[string]string{} + + for _, f := range files { + // Find path from root to directory + pathIds, _, err := graph.Dijkstra(idGraph, rootId, f.Id) + if err != nil { + return nil, err + } + + // Convert path ids to path names + var pathNames []string + for _, id := range pathIds { + pathNames = append(pathNames, names[id]) + } + + // Store relative file path from root to directory + paths[f.Id] = filepath.Join(pathNames...) + } + + return paths, nil +} + +type localFile struct { + absPath string + relPath string + info os.FileInfo +} + +type remoteFile struct { + relPath string + file *drive.File +} + +type changedFile struct { + local *localFile + remote *remoteFile +} + +func prepareLocalFiles(root string) ([]*localFile, error) { + var files []*localFile + + // Get absolute root path + absRootPath, err := filepath.Abs(root) + if err != nil { + return nil, err + } + + err = filepath.Walk(absRootPath, func(absPath string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Skip root directory + if absPath == absRootPath { + return nil + } + + relPath, err := filepath.Rel(absRootPath, absPath) + if err != nil { + return err + } + + files = append(files, &localFile{ + absPath: absPath, + relPath: relPath, + info: info, + }) + + return nil + }) + + if err != nil { + return nil, fmt.Errorf("Failed to prepare local files: %s", err) + } + + return files, err +} + +type syncFiles struct { + root *remoteFile + local []*localFile + remote []*remoteFile +} + +func (self *syncFiles) filterMissingRemoteDirs() []*localFile { + var files []*localFile + + for _, f := range self.local { + if f.info.IsDir() && !self.existsRemote(f) { + files = append(files, f) + } + } + + return files +} + +func (self *syncFiles) filterMissingRemoteFiles() []*localFile { + var files []*localFile + + for _, f := range self.local { + if !f.info.IsDir() && !self.existsRemote(f) { + files = append(files, f) + } + } + + return files +} + +func (self *syncFiles) filterChangedLocalFiles() []*changedFile { + var files []*changedFile + + for _, lf := range self.local { + // Skip directories + if lf.info.IsDir() { + continue + } + + // Skip files that don't exist on drive + rf, found := self.findRemoteByPath(lf.relPath) + if !found { + continue + } + + // Add files where remote md5 sum does not match local + if rf.file.Md5Checksum != md5sum(lf.absPath) { + files = append(files, &changedFile{ + local: lf, + remote: rf, + }) + } + } + + return files +} + +func (self *syncFiles) existsRemote(lf *localFile) bool { + _, found := self.findRemoteByPath(lf.relPath) + return found +} + +func (self *syncFiles) findRemoteByPath(relPath string) (*remoteFile, bool) { + if relPath == "." { + return self.root, true + } + + for _, rf := range self.remote { + if relPath == rf.relPath { + return rf, true + } + } + + return nil, false +} + +type byPathLength []*localFile + +func (self byPathLength) Len() int { + return len(self) +} + +func (self byPathLength) Swap(i, j int) { + self[i], self[j] = self[j], self[i] +} + +func (self byPathLength) Less(i, j int) bool { + return pathLength(self[i].relPath) < pathLength(self[j].relPath) +} diff --git a/drive/util.go b/drive/util.go index f4922862..1c430095 100644 --- a/drive/util.go +++ b/drive/util.go @@ -9,6 +9,8 @@ import ( "unicode/utf8" "math" "time" + "crypto/md5" + "io" ) type kv struct { @@ -134,3 +136,24 @@ func mkdir(path string) error { func intMax() int64 { return 1 << (strconv.IntSize - 1) - 1 } + +func md5sum(path string) string { + h := md5.New() + f, err := os.Open(path) + if err != nil { + return "" + } + defer f.Close() + + io.Copy(h, f) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +func pathLength(path string) int { + return strings.Count(path, string(os.PathSeparator)) +} + +func parentFilePath(path string) string { + dir, _ := filepath.Split(path) + return filepath.Dir(dir) +} diff --git a/gdrive.go b/gdrive.go index a675ca10..562045bd 100644 --- a/gdrive.go +++ b/gdrive.go @@ -291,6 +291,39 @@ func main() { }, }, }, + &cli.Handler{ + Pattern: "[global] upload sync [options] ", + Description: "Sync local directory to drive", + Callback: uploadSyncHandler, + Flags: cli.Flags{ + "global": globalFlags, + "options": []cli.Flag{ + cli.StringFlag{ + Name: "parent", + Patterns: []string{"-p", "--parent"}, + Description: "Parent id", + }, + cli.BoolFlag{ + Name: "noProgress", + Patterns: []string{"--no-progress"}, + Description: "Hide progress", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "deleteRemote", + Patterns: []string{"--delete-remote"}, + Description: "Delete extraneous files from drive", + OmitValue: true, + }, + cli.IntFlag{ + Name: "chunksize", + Patterns: []string{"--chunksize"}, + Description: fmt.Sprintf("Set chunk size in bytes, default: %d", DefaultUploadChunkSize), + DefaultValue: DefaultUploadChunkSize, + }, + }, + }, + }, &cli.Handler{ Pattern: "[global] update [options] ", Description: "Update file, this creates a new revision of the file", diff --git a/handlers_drive.go b/handlers_drive.go index 297b9f40..75da5938 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -99,6 +99,19 @@ func uploadStdinHandler(ctx cli.Context) { checkErr(err) } +func uploadSyncHandler(ctx cli.Context) { + args := ctx.Args() + err := newDrive(args).UploadSync(drive.UploadSyncArgs{ + Out: os.Stdout, + Progress: progressWriter(args.Bool("noProgress")), + Path: args.String("path"), + Parent: args.String("parent"), + DeleteRemote: args.Bool("deleteRemote"), + ChunkSize: args.Int64("chunksize"), + }) + checkErr(err) +} + func updateHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).Update(drive.UpdateArgs{ From 3b8a364369c61cc307b84ddcaf52412324999fe9 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 31 Jan 2016 01:05:22 +0100 Subject: [PATCH 063/195] Prepare sync files async --- drive/upload_sync.go | 48 ++++++++++++++++++++++++++++++-------------- 1 file changed, 33 insertions(+), 15 deletions(-) diff --git a/drive/upload_sync.go b/drive/upload_sync.go index 26b22b4f..c7cc8e0e 100644 --- a/drive/upload_sync.go +++ b/drive/upload_sync.go @@ -36,25 +36,12 @@ func (self *Drive) UploadSync(args UploadSyncArgs) error { fmt.Fprintln(args.Out, "Found existing root directory, let's see whats changed") } - // TODO: do concurrently - fmt.Println("preparing local") - localFiles, err := prepareLocalFiles(args.Path) + // Collect information about local and remote files + files, err := self.prepareSyncFiles(args.Path, rootDir) if err != nil { return err } - fmt.Println("preparing remote") - remoteFiles, err := self.prepareRemoteFiles(rootDir) - if err != nil { - return err - } - - files := &syncFiles{ - root: &remoteFile{file: rootDir}, - local: localFiles, - remote: remoteFiles, - } - // Create missing directories files, err = self.createMissingRemoteDirs(files) if err != nil { @@ -76,6 +63,37 @@ func (self *Drive) UploadSync(args UploadSyncArgs) error { return nil } +func (self *Drive) prepareSyncFiles(localPath string, root *drive.File) (*syncFiles, error) { + localCh := make(chan struct{files []*localFile; err error}) + remoteCh := make(chan struct{files []*remoteFile; err error}) + + go func() { + files, err := prepareLocalFiles(localPath) + localCh <- struct{files []*localFile; err error}{files, err} + }() + + go func() { + files, err := self.prepareRemoteFiles(root) + remoteCh <- struct{files []*remoteFile; err error}{files, err} + }() + + local := <-localCh + if local.err != nil { + return nil, local.err + } + + remote := <-remoteCh + if remote.err != nil { + return nil, remote.err + } + + return &syncFiles{ + root: &remoteFile{file: root}, + local: local.files, + remote: remote.files, + }, nil +} + func (self *Drive) getOrCreateSyncRootDir(args UploadSyncArgs) (*drive.File, bool, error) { // Root dir name name := filepath.Base(args.Path) From 3d98eb0ac55a09a92f4dd1e214f246e988874e9f Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 31 Jan 2016 01:57:51 +0100 Subject: [PATCH 064/195] More verbosity --- drive/upload_sync.go | 54 ++++++++++++++++++++++++++++++++------------ 1 file changed, 39 insertions(+), 15 deletions(-) diff --git a/drive/upload_sync.go b/drive/upload_sync.go index c7cc8e0e..b0000061 100644 --- a/drive/upload_sync.go +++ b/drive/upload_sync.go @@ -4,6 +4,7 @@ import ( "fmt" "io" "os" + "time" "sort" "path/filepath" "github.com/gyuho/goraph/graph" @@ -25,25 +26,29 @@ func (self *Drive) UploadSync(args UploadSyncArgs) error { return fmt.Errorf("Chunk size is to big, max chunk size for this computer is %d", intMax() - 1) } - rootDir, created, err := self.getOrCreateSyncRootDir(args) + fmt.Fprintln(args.Out, "Starting sync...") + started := time.Now() + + // Create root directory if it does not exist + rootDir, createdRoot, err := self.getOrCreateSyncRootDir(args) if err != nil { return err } - if created { - fmt.Fprintln(args.Out, "Did not find any existing files, starting from scratch") - } else { - fmt.Fprintln(args.Out, "Found existing root directory, let's see whats changed") + if createdRoot { + fmt.Fprintln(args.Out, "No existing files found on drive, starting from scratch") } - // Collect information about local and remote files + fmt.Fprintln(args.Out, "Collecting local and remote file information...") files, err := self.prepareSyncFiles(args.Path, rootDir) if err != nil { return err } + fmt.Fprintf(args.Out, "Found %d local file(s) and %d remote file(s)\n", len(files.local), len(files.remote)) + // Create missing directories - files, err = self.createMissingRemoteDirs(files) + files, err = self.createMissingRemoteDirs(files, args) if err != nil { return err } @@ -60,6 +65,8 @@ func (self *Drive) UploadSync(args UploadSyncArgs) error { return err } + fmt.Fprintf(args.Out, "Sync finished in %s\n", time.Since(started)) + return nil } @@ -141,13 +148,18 @@ func (self *Drive) getOrCreateSyncRootDir(args UploadSyncArgs) (*drive.File, boo return f, true, nil } -func (self *Drive) createMissingRemoteDirs(files *syncFiles) (*syncFiles, error) { +func (self *Drive) createMissingRemoteDirs(files *syncFiles, args UploadSyncArgs) (*syncFiles, error) { missingDirs := files.filterMissingRemoteDirs() + missingCount := len(missingDirs) + + if missingCount > 0 { + fmt.Fprintf(args.Out, "\n%d directories missing on drive\n", missingCount) + } // Sort directories so that the dirs with the shortest path comes first sort.Sort(byPathLength(missingDirs)) - for _, lf := range missingDirs { + for i, lf := range missingDirs { parentPath := parentFilePath(lf.relPath) parent, ok := files.findRemoteByPath(parentPath) if !ok { @@ -161,7 +173,7 @@ func (self *Drive) createMissingRemoteDirs(files *syncFiles) (*syncFiles, error) AppProperties: map[string]string{"syncRootId": files.root.file.Id}, } - fmt.Printf("Creating directory: %s\n", filepath.Join(files.root.file.Name, lf.relPath)) + fmt.Fprintf(args.Out, "[%04d/%04d] Creating directory: %s\n", i + 1, missingCount, filepath.Join(files.root.file.Name, lf.relPath)) f, err := self.service.Files.Create(dstFile).Do() if err != nil { @@ -178,7 +190,14 @@ func (self *Drive) createMissingRemoteDirs(files *syncFiles) (*syncFiles, error) } func (self *Drive) uploadMissingFiles(files *syncFiles, args UploadSyncArgs) error { - for _, lf := range files.filterMissingRemoteFiles() { + missingFiles := files.filterMissingRemoteFiles() + missingCount := len(missingFiles) + + if missingCount > 0 { + fmt.Fprintf(args.Out, "\n%d file(s) missing on drive\n", missingCount) + } + + for i, lf := range missingFiles { parentPath := parentFilePath(lf.relPath) parent, ok := files.findRemoteByPath(parentPath) if !ok { @@ -189,7 +208,7 @@ func (self *Drive) uploadMissingFiles(files *syncFiles, args UploadSyncArgs) err newArgs.Path = lf.absPath newArgs.Parent = parent.file.Id - fmt.Printf("%s -> %s\n", lf.absPath, filepath.Join(files.root.file.Name, lf.relPath)) + fmt.Fprintf(args.Out, "[%04d/%04d] Uploading %s -> %s\n", i + 1, missingCount, lf.absPath, filepath.Join(files.root.file.Name, lf.relPath)) err := self.uploadMissingFile(files.root.file.Id, lf, newArgs) if err != nil { return err @@ -200,10 +219,15 @@ func (self *Drive) uploadMissingFiles(files *syncFiles, args UploadSyncArgs) err } func (self *Drive) updateChangedFiles(files *syncFiles, args UploadSyncArgs) error { - for _, cf := range files.filterChangedLocalFiles() { - fmt.Println(cf.local.absPath) + changedFiles := files.filterChangedLocalFiles() + changedCount := len(changedFiles) + + if changedCount > 0 { + fmt.Fprintf(args.Out, "\n%d local file(s) has changed\n", changedCount) + } - fmt.Printf("Updating %s -> %s\n", cf.local.absPath, filepath.Join(files.root.file.Name, cf.local.relPath)) + for i, cf := range changedFiles { + fmt.Fprintf(args.Out, "[%04d/%04d] Updating %s -> %s\n", i + 1, changedCount, cf.local.absPath, filepath.Join(files.root.file.Name, cf.local.relPath)) err := self.updateChangedFile(cf, args) if err != nil { return err From 893e48c864ac601fbde20f1b07331852e26358a7 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 31 Jan 2016 12:56:58 +0100 Subject: [PATCH 065/195] Delete extraneous files --- drive/upload_sync.go | 64 +++++++++++++++++++++++++++++++++++++++++++- gdrive.go | 4 +-- handlers_drive.go | 2 +- 3 files changed, 66 insertions(+), 4 deletions(-) diff --git a/drive/upload_sync.go b/drive/upload_sync.go index b0000061..763bfcf5 100644 --- a/drive/upload_sync.go +++ b/drive/upload_sync.go @@ -17,7 +17,7 @@ type UploadSyncArgs struct { Progress io.Writer Path string Parent string - DeleteRemote bool + DeleteExtraneous bool ChunkSize int64 } @@ -65,6 +65,13 @@ func (self *Drive) UploadSync(args UploadSyncArgs) error { return err } + // Delete extraneous files on drive + if args.DeleteExtraneous { + err = self.deleteExtraneousRemoteFiles(files, args) + if err != nil { + return err + } + } fmt.Fprintf(args.Out, "Sync finished in %s\n", time.Since(started)) return nil @@ -237,6 +244,25 @@ func (self *Drive) updateChangedFiles(files *syncFiles, args UploadSyncArgs) err return nil } +func (self *Drive) deleteExtraneousRemoteFiles(files *syncFiles, args UploadSyncArgs) error { + extraneousFiles := files.filterExtraneousRemoteFiles() + extraneousCount := len(extraneousFiles) + + if extraneousCount > 0 { + fmt.Fprintf(args.Out, "\n%d extraneous file(s) on drive\n", extraneousCount) + } + + for i, rf := range extraneousFiles { + fmt.Fprintf(args.Out, "[%04d/%04d] Deleting %s\n", i + 1, extraneousCount, filepath.Join(files.root.file.Name, rf.relPath)) + err := self.deleteRemoteFile(rf, args) + if err != nil { + return err + } + } + + return nil +} + func (self *Drive) uploadMissingFile(rootId string, lf *localFile, args UploadSyncArgs) error { srcFile, err := os.Open(lf.absPath) if err != nil { @@ -287,6 +313,15 @@ func (self *Drive) updateChangedFile(cf *changedFile, args UploadSyncArgs) error return nil } +func (self *Drive) deleteRemoteFile(rf *remoteFile, args UploadSyncArgs) error { + err := self.service.Files.Delete(rf.file.Id).Do() + if err != nil { + return fmt.Errorf("Failed to delete file: %s", err) + } + + return nil +} + func (self *Drive) prepareRemoteFiles(rootDir *drive.File) ([]*remoteFile, error) { // Find all files which has rootDir as root query := fmt.Sprintf("appProperties has {key='syncRootId' and value='%s'}", rootDir.Id) @@ -488,11 +523,28 @@ func (self *syncFiles) filterChangedLocalFiles() []*changedFile { return files } +func (self *syncFiles) filterExtraneousRemoteFiles() []*remoteFile { + var files []*remoteFile + + for _, rf := range self.remote { + if !self.existsLocal(rf) { + files = append(files, rf) + } + } + + return files +} + func (self *syncFiles) existsRemote(lf *localFile) bool { _, found := self.findRemoteByPath(lf.relPath) return found } +func (self *syncFiles) existsLocal(rf *remoteFile) bool { + _, found := self.findLocalByPath(rf.relPath) + return found +} + func (self *syncFiles) findRemoteByPath(relPath string) (*remoteFile, bool) { if relPath == "." { return self.root, true @@ -507,6 +559,16 @@ func (self *syncFiles) findRemoteByPath(relPath string) (*remoteFile, bool) { return nil, false } +func (self *syncFiles) findLocalByPath(relPath string) (*localFile, bool) { + for _, lf := range self.local { + if relPath == lf.relPath { + return lf, true + } + } + + return nil, false +} + type byPathLength []*localFile func (self byPathLength) Len() int { diff --git a/gdrive.go b/gdrive.go index 562045bd..b6d09405 100644 --- a/gdrive.go +++ b/gdrive.go @@ -310,8 +310,8 @@ func main() { OmitValue: true, }, cli.BoolFlag{ - Name: "deleteRemote", - Patterns: []string{"--delete-remote"}, + Name: "deleteExtraneous", + Patterns: []string{"--delete-extraneous"}, Description: "Delete extraneous files from drive", OmitValue: true, }, diff --git a/handlers_drive.go b/handlers_drive.go index 75da5938..e6939f2c 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -106,7 +106,7 @@ func uploadSyncHandler(ctx cli.Context) { Progress: progressWriter(args.Bool("noProgress")), Path: args.String("path"), Parent: args.String("parent"), - DeleteRemote: args.Bool("deleteRemote"), + DeleteExtraneous: args.Bool("deleteExtraneous"), ChunkSize: args.Int64("chunksize"), }) checkErr(err) From 90a9a8bc58568509b5314328b721b1e7a9400a51 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 31 Jan 2016 15:37:58 +0100 Subject: [PATCH 066/195] Require root dir id for syncing --- drive/upload_sync.go | 85 +++++++++++++++++++++----------------------- gdrive.go | 7 +--- handlers_drive.go | 2 +- 3 files changed, 43 insertions(+), 51 deletions(-) diff --git a/drive/upload_sync.go b/drive/upload_sync.go index 763bfcf5..3e157eda 100644 --- a/drive/upload_sync.go +++ b/drive/upload_sync.go @@ -16,7 +16,7 @@ type UploadSyncArgs struct { Out io.Writer Progress io.Writer Path string - Parent string + RootId string DeleteExtraneous bool ChunkSize int64 } @@ -30,15 +30,11 @@ func (self *Drive) UploadSync(args UploadSyncArgs) error { started := time.Now() // Create root directory if it does not exist - rootDir, createdRoot, err := self.getOrCreateSyncRootDir(args) + rootDir, err := self.prepareSyncRoot(args) if err != nil { return err } - if createdRoot { - fmt.Fprintln(args.Out, "No existing files found on drive, starting from scratch") - } - fmt.Fprintln(args.Out, "Collecting local and remote file information...") files, err := self.prepareSyncFiles(args.Path, rootDir) if err != nil { @@ -108,51 +104,46 @@ func (self *Drive) prepareSyncFiles(localPath string, root *drive.File) (*syncFi }, nil } -func (self *Drive) getOrCreateSyncRootDir(args UploadSyncArgs) (*drive.File, bool, error) { - // Root dir name - name := filepath.Base(args.Path) +func (self *Drive) prepareSyncRoot(args UploadSyncArgs) (*drive.File, error) { + fields := []googleapi.Field{"id", "name", "mimeType", "appProperties"} + f, err := self.service.Files.Get(args.RootId).Fields(fields...).Do() + if err != nil { + return nil, fmt.Errorf("Failed to find root dir: %s", err) + } - // Build root dir query - query := fmt.Sprintf("name = '%s' and appProperties has {key='isSyncRoot' and value='true'}", name) - if args.Parent != "" { - query += fmt.Sprintf(" and '%s' in parents", args.Parent) + // Ensure file is a directory + if !isDir(f) { + return nil, fmt.Errorf("Provided root id is not a directory") } - // Find root dir - fileList, err := self.service.Files.List().Q(query).Fields("files(id,name,mimeType)").Do() - if err != nil { - return nil, false, fmt.Errorf("Failed listing files: %s", err) + // Return directory if syncRoot property is already set + if _, ok := f.AppProperties["isSyncRoot"]; ok { + return f, nil } - // More than one root dir found - if len(fileList.Files) > 1 { - return nil, false, fmt.Errorf("More than one root directories found, aborting...") + // This is the first time this directory have been used for sync + // Check if the directory is empty + isEmpty, err := self.dirIsEmpty(f.Id) + if err != nil { + return nil, fmt.Errorf("Failed to check if root dir is empty: %s", err) } - // Root dir found, return - if len(fileList.Files) == 1 { - return fileList.Files[0], false, nil + // Ensure that the directory is empty + if !isEmpty { + return nil, fmt.Errorf("Root directoy is not empty, the initial sync requires an empty directory") } - // Root dir not found, create new + // Update directory with syncRoot property dstFile := &drive.File{ - Name: name, - MimeType: DirectoryMimeType, AppProperties: map[string]string{"isSyncRoot": "true"}, } - // Add parent if provided - if args.Parent != "" { - dstFile.Parents = []string{args.Parent} - } - - // Create directory - f, err := self.service.Files.Create(dstFile).Do() + f, err = self.service.Files.Update(f.Id, dstFile).Fields(fields...).Do() if err != nil { - return nil, false, fmt.Errorf("Failed to create directory: %s", err) + return nil, fmt.Errorf("Failed to update root directory: %s", err) } - return f, true, nil + return f, nil } func (self *Drive) createMissingRemoteDirs(files *syncFiles, args UploadSyncArgs) (*syncFiles, error) { @@ -177,7 +168,7 @@ func (self *Drive) createMissingRemoteDirs(files *syncFiles, args UploadSyncArgs Name: lf.info.Name(), MimeType: DirectoryMimeType, Parents: []string{parent.file.Id}, - AppProperties: map[string]string{"syncRootId": files.root.file.Id}, + AppProperties: map[string]string{"syncRootId": args.RootId}, } fmt.Fprintf(args.Out, "[%04d/%04d] Creating directory: %s\n", i + 1, missingCount, filepath.Join(files.root.file.Name, lf.relPath)) @@ -211,12 +202,8 @@ func (self *Drive) uploadMissingFiles(files *syncFiles, args UploadSyncArgs) err return fmt.Errorf("Could not find remote directory with path '%s', aborting...", parentPath) } - newArgs := args - newArgs.Path = lf.absPath - newArgs.Parent = parent.file.Id - fmt.Fprintf(args.Out, "[%04d/%04d] Uploading %s -> %s\n", i + 1, missingCount, lf.absPath, filepath.Join(files.root.file.Name, lf.relPath)) - err := self.uploadMissingFile(files.root.file.Id, lf, newArgs) + err := self.uploadMissingFile(parent.file.Id, lf, args) if err != nil { return err } @@ -263,7 +250,7 @@ func (self *Drive) deleteExtraneousRemoteFiles(files *syncFiles, args UploadSync return nil } -func (self *Drive) uploadMissingFile(rootId string, lf *localFile, args UploadSyncArgs) error { +func (self *Drive) uploadMissingFile(parentId string, lf *localFile, args UploadSyncArgs) error { srcFile, err := os.Open(lf.absPath) if err != nil { return fmt.Errorf("Failed to open file: %s", err) @@ -272,8 +259,8 @@ func (self *Drive) uploadMissingFile(rootId string, lf *localFile, args UploadSy // Instantiate drive file dstFile := &drive.File{ Name: lf.info.Name(), - Parents: []string{args.Parent}, - AppProperties: map[string]string{"syncRootId": rootId}, + Parents: []string{parentId}, + AppProperties: map[string]string{"syncRootId": args.RootId}, } // Chunk size option @@ -354,6 +341,16 @@ func (self *Drive) prepareRemoteFiles(rootDir *drive.File) ([]*remoteFile, error return remoteFiles, nil } +func (self *Drive) dirIsEmpty(id string) (bool, error) { + query := fmt.Sprintf("'%s' in parents", id) + fileList, err := self.service.Files.List().Q(query).Do() + if err != nil { + return false, fmt.Errorf("Empty dir check failed: ", err) + } + + return len(fileList.Files) == 0, nil +} + func checkFiles(files []*drive.File) error { uniq := map[string]string{} diff --git a/gdrive.go b/gdrive.go index b6d09405..cbe647fe 100644 --- a/gdrive.go +++ b/gdrive.go @@ -292,17 +292,12 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global] upload sync [options] ", + Pattern: "[global] upload sync [options] ", Description: "Sync local directory to drive", Callback: uploadSyncHandler, Flags: cli.Flags{ "global": globalFlags, "options": []cli.Flag{ - cli.StringFlag{ - Name: "parent", - Patterns: []string{"-p", "--parent"}, - Description: "Parent id", - }, cli.BoolFlag{ Name: "noProgress", Patterns: []string{"--no-progress"}, diff --git a/handlers_drive.go b/handlers_drive.go index e6939f2c..47e5f042 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -105,7 +105,7 @@ func uploadSyncHandler(ctx cli.Context) { Out: os.Stdout, Progress: progressWriter(args.Bool("noProgress")), Path: args.String("path"), - Parent: args.String("parent"), + RootId: args.String("id"), DeleteExtraneous: args.Bool("deleteExtraneous"), ChunkSize: args.Int64("chunksize"), }) From 2ff8d861923207314ed492637ec97a369b6768da Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Tue, 2 Feb 2016 22:32:03 +0100 Subject: [PATCH 067/195] Initial download sync implementation --- drive/download_sync.go | 216 +++++++++++++++++++++++++++++++++++++++++ drive/upload_sync.go | 77 +++++++++++++++ gdrive.go | 22 +++++ handlers_drive.go | 12 +++ 4 files changed, 327 insertions(+) create mode 100644 drive/download_sync.go diff --git a/drive/download_sync.go b/drive/download_sync.go new file mode 100644 index 00000000..c4bd9775 --- /dev/null +++ b/drive/download_sync.go @@ -0,0 +1,216 @@ +package drive + +import ( + "fmt" + "io" + "os" + "sort" + "time" + "path/filepath" + "google.golang.org/api/googleapi" + "google.golang.org/api/drive/v3" +) + +type DownloadSyncArgs struct { + Out io.Writer + Progress io.Writer + RootId string + Path string + DeleteExtraneous bool +} + +func (self *Drive) DownloadSync(args DownloadSyncArgs) error { + fmt.Fprintln(args.Out, "Starting sync...") + started := time.Now() + + // Get remote root dir + rootDir, err := self.getSyncRoot(args.RootId) + if err != nil { + return err + } + + fmt.Fprintln(args.Out, "Collecting local and remote file information...") + files, err := self.prepareSyncFiles(args.Path, rootDir) + if err != nil { + return err + } + + fmt.Fprintf(args.Out, "Found %d local file(s) and %d remote file(s)\n", len(files.local), len(files.remote)) + + // Create missing directories + files, err = self.createMissingLocalDirs(files, args) + if err != nil { + return err + } + + // Download missing files + err = self.downloadMissingFiles(files, args) + if err != nil { + return err + } + + // Download files that has changed + err = self.downloadChangedFiles(files, args) + if err != nil { + return err + } + + // Delete extraneous local files + if args.DeleteExtraneous { + err = self.deleteExtraneousLocalFiles(files, args) + if err != nil { + return err + } + } + fmt.Fprintf(args.Out, "Sync finished in %s\n", time.Since(started)) + + return nil +} + +func (self *Drive) getSyncRoot(rootId string) (*drive.File, error) { + fields := []googleapi.Field{"id", "name", "mimeType", "appProperties"} + f, err := self.service.Files.Get(rootId).Fields(fields...).Do() + if err != nil { + return nil, fmt.Errorf("Failed to find root dir: %s", err) + } + + // Ensure file is a directory + if !isDir(f) { + return nil, fmt.Errorf("Provided root id is not a directory") + } + + // Ensure directory is a proper syncRoot + if _, ok := f.AppProperties["isSyncRoot"]; !ok { + return nil, fmt.Errorf("Provided id is not a sync root directory") + } + + return f, nil +} + +func (self *Drive) createMissingLocalDirs(files *syncFiles, args DownloadSyncArgs) (*syncFiles, error) { + missingDirs := files.filterMissingLocalDirs() + missingCount := len(missingDirs) + + if missingCount > 0 { + fmt.Fprintf(args.Out, "\n%d local directories are missing\n", missingCount) + } + + // Sort directories so that the dirs with the shortest path comes first + sort.Sort(byRemotePathLength(missingDirs)) + + for i, rf := range missingDirs { + path, err := filepath.Abs(filepath.Join(args.Path, rf.relPath)) + if err != nil { + return nil, fmt.Errorf("Failed to determine local absolute path: %s", err) + } + fmt.Fprintf(args.Out, "[%04d/%04d] Creating directory: %s\n", i + 1, missingCount, path) + mkdir(path) + } + + return files, nil +} + +func (self *Drive) downloadMissingFiles(files *syncFiles, args DownloadSyncArgs) error { + missingFiles := files.filterMissingLocalFiles() + missingCount := len(missingFiles) + + if missingCount > 0 { + fmt.Fprintf(args.Out, "\n%d local file(s) are missing\n", missingCount) + } + + for i, rf := range missingFiles { + remotePath := filepath.Join(files.root.file.Name, rf.relPath) + localPath, err := filepath.Abs(filepath.Join(args.Path, rf.relPath)) + if err != nil { + return fmt.Errorf("Failed to determine local absolute path: %s", err) + } + fmt.Fprintf(args.Out, "[%04d/%04d] Downloading %s -> %s\n", i + 1, missingCount, remotePath, localPath) + err = self.downloadRemoteFile(rf.file.Id, localPath, args) + if err != nil { + return err + } + } + + return nil +} + +func (self *Drive) downloadChangedFiles(files *syncFiles, args DownloadSyncArgs) error { + changedFiles := files.filterChangedRemoteFiles() + changedCount := len(changedFiles) + + if changedCount > 0 { + fmt.Fprintf(args.Out, "\n%d remote file(s) has changed\n", changedCount) + } + + for i, cf := range changedFiles { + remotePath := filepath.Join(files.root.file.Name, cf.remote.relPath) + localPath, err := filepath.Abs(filepath.Join(args.Path, cf.remote.relPath)) + if err != nil { + return fmt.Errorf("Failed to determine local absolute path: %s", err) + } + fmt.Fprintf(args.Out, "[%04d/%04d] Downloading %s -> %s\n", i + 1, changedCount, remotePath, localPath) + err = self.downloadRemoteFile(cf.remote.file.Id, localPath, args) + if err != nil { + return err + } + } + + return nil +} + +func (self *Drive) downloadRemoteFile(id, fpath string, args DownloadSyncArgs) error { + res, err := self.service.Files.Get(id).Download() + if err != nil { + return fmt.Errorf("Failed to download file: %s", err) + } + + // Close body on function exit + defer res.Body.Close() + + // Wrap response body in progress reader + srcReader := getProgressReader(res.Body, args.Progress, res.ContentLength) + + // Ensure any parent directories exists + if err = mkdir(fpath); err != nil { + return err + } + + // Create new file + outFile, err := os.Create(fpath) + if err != nil { + return fmt.Errorf("Unable to create local file: %s", err) + } + + // Close file on function exit + defer outFile.Close() + + // Save file to disk + _, err = io.Copy(outFile, srcReader) + if err != nil { + return fmt.Errorf("Download was interrupted: %s", err) + } + + return nil +} + +func (self *Drive) deleteExtraneousLocalFiles(files *syncFiles, args DownloadSyncArgs) error { + extraneousFiles := files.filterExtraneousLocalFiles() + extraneousCount := len(extraneousFiles) + + if extraneousCount > 0 { + fmt.Fprintf(args.Out, "\n%d local file(s) are extraneous\n", extraneousCount) + } + + // Sort files so that the files with the longest path comes first + sort.Sort(sort.Reverse(byPathLength(extraneousFiles))) + + for i, lf := range extraneousFiles { + fmt.Fprintf(args.Out, "[%04d/%04d] Deleting %s\n", i + 1, extraneousCount, lf.absPath) + err := os.Remove(lf.absPath) + if err != nil { + return fmt.Errorf("Failed to delete local file: %s", err) + } + } + + return nil +} diff --git a/drive/upload_sync.go b/drive/upload_sync.go index 3e157eda..081f739d 100644 --- a/drive/upload_sync.go +++ b/drive/upload_sync.go @@ -481,6 +481,18 @@ func (self *syncFiles) filterMissingRemoteDirs() []*localFile { return files } +func (self *syncFiles) filterMissingLocalDirs() []*remoteFile { + var files []*remoteFile + + for _, rf := range self.remote { + if isDir(rf.file) && !self.existsLocal(rf) { + files = append(files, rf) + } + } + + return files +} + func (self *syncFiles) filterMissingRemoteFiles() []*localFile { var files []*localFile @@ -493,6 +505,18 @@ func (self *syncFiles) filterMissingRemoteFiles() []*localFile { return files } +func (self *syncFiles) filterMissingLocalFiles() []*remoteFile { + var files []*remoteFile + + for _, rf := range self.remote { + if !isDir(rf.file) && !self.existsLocal(rf) { + files = append(files, rf) + } + } + + return files +} + func (self *syncFiles) filterChangedLocalFiles() []*changedFile { var files []*changedFile @@ -520,6 +544,33 @@ func (self *syncFiles) filterChangedLocalFiles() []*changedFile { return files } +func (self *syncFiles) filterChangedRemoteFiles() []*changedFile { + var files []*changedFile + + for _, rf := range self.remote { + // Skip directories + if isDir(rf.file) { + continue + } + + // Skip local files that don't exist + lf, found := self.findLocalByPath(rf.relPath) + if !found { + continue + } + + // Add files where remote md5 sum does not match local + if rf.file.Md5Checksum != md5sum(lf.absPath) { + files = append(files, &changedFile{ + local: lf, + remote: rf, + }) + } + } + + return files +} + func (self *syncFiles) filterExtraneousRemoteFiles() []*remoteFile { var files []*remoteFile @@ -532,6 +583,18 @@ func (self *syncFiles) filterExtraneousRemoteFiles() []*remoteFile { return files } +func (self *syncFiles) filterExtraneousLocalFiles() []*localFile { + var files []*localFile + + for _, lf := range self.local { + if !self.existsRemote(lf) { + files = append(files, lf) + } + } + + return files +} + func (self *syncFiles) existsRemote(lf *localFile) bool { _, found := self.findRemoteByPath(lf.relPath) return found @@ -579,3 +642,17 @@ func (self byPathLength) Swap(i, j int) { func (self byPathLength) Less(i, j int) bool { return pathLength(self[i].relPath) < pathLength(self[j].relPath) } + +type byRemotePathLength []*remoteFile + +func (self byRemotePathLength) Len() int { + return len(self) +} + +func (self byRemotePathLength) Swap(i, j int) { + self[i], self[j] = self[j], self[i] +} + +func (self byRemotePathLength) Less(i, j int) bool { + return pathLength(self[i].relPath) < pathLength(self[j].relPath) +} diff --git a/gdrive.go b/gdrive.go index cbe647fe..462622be 100644 --- a/gdrive.go +++ b/gdrive.go @@ -182,6 +182,28 @@ func main() { }, }, }, + &cli.Handler{ + Pattern: "[global] download sync [options] ", + Description: "Sync drive directory to local directory", + Callback: downloadSyncHandler, + Flags: cli.Flags{ + "global": globalFlags, + "options": []cli.Flag{ + cli.BoolFlag{ + Name: "noProgress", + Patterns: []string{"--no-progress"}, + Description: "Hide progress", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "deleteExtraneous", + Patterns: []string{"--delete-extraneous"}, + Description: "Delete extraneous local files", + OmitValue: true, + }, + }, + }, + }, &cli.Handler{ Pattern: "[global] download revision [options] ", Description: "Download revision", diff --git a/handlers_drive.go b/handlers_drive.go index 47e5f042..2b6d61e2 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -56,6 +56,18 @@ func downloadHandler(ctx cli.Context) { checkErr(err) } +func downloadSyncHandler(ctx cli.Context) { + args := ctx.Args() + err := newDrive(args).DownloadSync(drive.DownloadSyncArgs{ + Out: os.Stdout, + Progress: progressWriter(args.Bool("noProgress")), + Path: args.String("path"), + RootId: args.String("id"), + DeleteExtraneous: args.Bool("deleteExtraneous"), + }) + checkErr(err) +} + func downloadRevisionHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).DownloadRevision(drive.DownloadRevisionArgs{ From 3e09b6dad4e90c4a2bcc97b393f3096f7abf2d8d Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Tue, 2 Feb 2016 22:34:42 +0100 Subject: [PATCH 068/195] s/byPathLength/byLocalPathLength/ --- drive/download_sync.go | 2 +- drive/upload_sync.go | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drive/download_sync.go b/drive/download_sync.go index c4bd9775..5fb88abf 100644 --- a/drive/download_sync.go +++ b/drive/download_sync.go @@ -202,7 +202,7 @@ func (self *Drive) deleteExtraneousLocalFiles(files *syncFiles, args DownloadSyn } // Sort files so that the files with the longest path comes first - sort.Sort(sort.Reverse(byPathLength(extraneousFiles))) + sort.Sort(sort.Reverse(byLocalPathLength(extraneousFiles))) for i, lf := range extraneousFiles { fmt.Fprintf(args.Out, "[%04d/%04d] Deleting %s\n", i + 1, extraneousCount, lf.absPath) diff --git a/drive/upload_sync.go b/drive/upload_sync.go index 081f739d..ed1e7a7c 100644 --- a/drive/upload_sync.go +++ b/drive/upload_sync.go @@ -155,7 +155,7 @@ func (self *Drive) createMissingRemoteDirs(files *syncFiles, args UploadSyncArgs } // Sort directories so that the dirs with the shortest path comes first - sort.Sort(byPathLength(missingDirs)) + sort.Sort(byLocalPathLength(missingDirs)) for i, lf := range missingDirs { parentPath := parentFilePath(lf.relPath) @@ -629,17 +629,17 @@ func (self *syncFiles) findLocalByPath(relPath string) (*localFile, bool) { return nil, false } -type byPathLength []*localFile +type byLocalPathLength []*localFile -func (self byPathLength) Len() int { +func (self byLocalPathLength) Len() int { return len(self) } -func (self byPathLength) Swap(i, j int) { +func (self byLocalPathLength) Swap(i, j int) { self[i], self[j] = self[j], self[i] } -func (self byPathLength) Less(i, j int) bool { +func (self byLocalPathLength) Less(i, j int) bool { return pathLength(self[i].relPath) < pathLength(self[j].relPath) } From 2a392831aa7112a2faa700a55ad057557a39410b Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Tue, 2 Feb 2016 22:38:08 +0100 Subject: [PATCH 069/195] Sort files by longest path --- drive/upload_sync.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drive/upload_sync.go b/drive/upload_sync.go index ed1e7a7c..748e61db 100644 --- a/drive/upload_sync.go +++ b/drive/upload_sync.go @@ -239,6 +239,9 @@ func (self *Drive) deleteExtraneousRemoteFiles(files *syncFiles, args UploadSync fmt.Fprintf(args.Out, "\n%d extraneous file(s) on drive\n", extraneousCount) } + // Sort files so that the files with the longest path comes first + sort.Sort(sort.Reverse(byRemotePathLength(extraneousFiles))) + for i, rf := range extraneousFiles { fmt.Fprintf(args.Out, "[%04d/%04d] Deleting %s\n", i + 1, extraneousCount, filepath.Join(files.root.file.Name, rf.relPath)) err := self.deleteRemoteFile(rf, args) From 5ad8ce4c919ec3a9b8a10848ceee693260b33be2 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Tue, 2 Feb 2016 22:39:21 +0100 Subject: [PATCH 070/195] Change wording --- drive/download_sync.go | 8 ++++---- drive/upload_sync.go | 10 +++++----- gdrive.go | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/drive/download_sync.go b/drive/download_sync.go index 5fb88abf..37c78cb6 100644 --- a/drive/download_sync.go +++ b/drive/download_sync.go @@ -35,7 +35,7 @@ func (self *Drive) DownloadSync(args DownloadSyncArgs) error { return err } - fmt.Fprintf(args.Out, "Found %d local file(s) and %d remote file(s)\n", len(files.local), len(files.remote)) + fmt.Fprintf(args.Out, "Found %d local files and %d remote files\n", len(files.local), len(files.remote)) // Create missing directories files, err = self.createMissingLocalDirs(files, args) @@ -115,7 +115,7 @@ func (self *Drive) downloadMissingFiles(files *syncFiles, args DownloadSyncArgs) missingCount := len(missingFiles) if missingCount > 0 { - fmt.Fprintf(args.Out, "\n%d local file(s) are missing\n", missingCount) + fmt.Fprintf(args.Out, "\n%d local files are missing\n", missingCount) } for i, rf := range missingFiles { @@ -139,7 +139,7 @@ func (self *Drive) downloadChangedFiles(files *syncFiles, args DownloadSyncArgs) changedCount := len(changedFiles) if changedCount > 0 { - fmt.Fprintf(args.Out, "\n%d remote file(s) has changed\n", changedCount) + fmt.Fprintf(args.Out, "\n%d remote files has changed\n", changedCount) } for i, cf := range changedFiles { @@ -198,7 +198,7 @@ func (self *Drive) deleteExtraneousLocalFiles(files *syncFiles, args DownloadSyn extraneousCount := len(extraneousFiles) if extraneousCount > 0 { - fmt.Fprintf(args.Out, "\n%d local file(s) are extraneous\n", extraneousCount) + fmt.Fprintf(args.Out, "\n%d local files are extraneous\n", extraneousCount) } // Sort files so that the files with the longest path comes first diff --git a/drive/upload_sync.go b/drive/upload_sync.go index 748e61db..0af83f09 100644 --- a/drive/upload_sync.go +++ b/drive/upload_sync.go @@ -41,7 +41,7 @@ func (self *Drive) UploadSync(args UploadSyncArgs) error { return err } - fmt.Fprintf(args.Out, "Found %d local file(s) and %d remote file(s)\n", len(files.local), len(files.remote)) + fmt.Fprintf(args.Out, "Found %d local files and %d remote files\n", len(files.local), len(files.remote)) // Create missing directories files, err = self.createMissingRemoteDirs(files, args) @@ -151,7 +151,7 @@ func (self *Drive) createMissingRemoteDirs(files *syncFiles, args UploadSyncArgs missingCount := len(missingDirs) if missingCount > 0 { - fmt.Fprintf(args.Out, "\n%d directories missing on drive\n", missingCount) + fmt.Fprintf(args.Out, "\n%d remote directories are missing\n", missingCount) } // Sort directories so that the dirs with the shortest path comes first @@ -192,7 +192,7 @@ func (self *Drive) uploadMissingFiles(files *syncFiles, args UploadSyncArgs) err missingCount := len(missingFiles) if missingCount > 0 { - fmt.Fprintf(args.Out, "\n%d file(s) missing on drive\n", missingCount) + fmt.Fprintf(args.Out, "\n%d remote files are missing\n", missingCount) } for i, lf := range missingFiles { @@ -217,7 +217,7 @@ func (self *Drive) updateChangedFiles(files *syncFiles, args UploadSyncArgs) err changedCount := len(changedFiles) if changedCount > 0 { - fmt.Fprintf(args.Out, "\n%d local file(s) has changed\n", changedCount) + fmt.Fprintf(args.Out, "\n%d local files has changed\n", changedCount) } for i, cf := range changedFiles { @@ -236,7 +236,7 @@ func (self *Drive) deleteExtraneousRemoteFiles(files *syncFiles, args UploadSync extraneousCount := len(extraneousFiles) if extraneousCount > 0 { - fmt.Fprintf(args.Out, "\n%d extraneous file(s) on drive\n", extraneousCount) + fmt.Fprintf(args.Out, "\n%d remote files are extraneous\n", extraneousCount) } // Sort files so that the files with the longest path comes first diff --git a/gdrive.go b/gdrive.go index 462622be..19cbdf0e 100644 --- a/gdrive.go +++ b/gdrive.go @@ -329,7 +329,7 @@ func main() { cli.BoolFlag{ Name: "deleteExtraneous", Patterns: []string{"--delete-extraneous"}, - Description: "Delete extraneous files from drive", + Description: "Delete extraneous remote files", OmitValue: true, }, cli.IntFlag{ From bc29d65240130c5908a843c97672b79c2495678c Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Tue, 2 Feb 2016 22:55:29 +0100 Subject: [PATCH 071/195] Move share sync stuff to own file --- drive/sync.go | 378 +++++++++++++++++++++++++++++++++++++++++++ drive/upload_sync.go | 370 ------------------------------------------ 2 files changed, 378 insertions(+), 370 deletions(-) create mode 100644 drive/sync.go diff --git a/drive/sync.go b/drive/sync.go new file mode 100644 index 00000000..7054ab93 --- /dev/null +++ b/drive/sync.go @@ -0,0 +1,378 @@ +package drive + +import ( + "fmt" + "os" + "path/filepath" + "github.com/gyuho/goraph/graph" + "google.golang.org/api/drive/v3" +) + +func (self *Drive) prepareSyncFiles(localPath string, root *drive.File) (*syncFiles, error) { + localCh := make(chan struct{files []*localFile; err error}) + remoteCh := make(chan struct{files []*remoteFile; err error}) + + go func() { + files, err := prepareLocalFiles(localPath) + localCh <- struct{files []*localFile; err error}{files, err} + }() + + go func() { + files, err := self.prepareRemoteFiles(root) + remoteCh <- struct{files []*remoteFile; err error}{files, err} + }() + + local := <-localCh + if local.err != nil { + return nil, local.err + } + + remote := <-remoteCh + if remote.err != nil { + return nil, remote.err + } + + return &syncFiles{ + root: &remoteFile{file: root}, + local: local.files, + remote: remote.files, + }, nil +} + +func prepareLocalFiles(root string) ([]*localFile, error) { + var files []*localFile + + // Get absolute root path + absRootPath, err := filepath.Abs(root) + if err != nil { + return nil, err + } + + err = filepath.Walk(absRootPath, func(absPath string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Skip root directory + if absPath == absRootPath { + return nil + } + + relPath, err := filepath.Rel(absRootPath, absPath) + if err != nil { + return err + } + + files = append(files, &localFile{ + absPath: absPath, + relPath: relPath, + info: info, + }) + + return nil + }) + + if err != nil { + return nil, fmt.Errorf("Failed to prepare local files: %s", err) + } + + return files, err +} + +func (self *Drive) prepareRemoteFiles(rootDir *drive.File) ([]*remoteFile, error) { + // Find all files which has rootDir as root + query := fmt.Sprintf("appProperties has {key='syncRootId' and value='%s'}", rootDir.Id) + fileList, err := self.service.Files.List().Q(query).Fields("files(id,name,parents,md5Checksum,mimeType)").Do() + if err != nil { + return nil, fmt.Errorf("Failed listing files: %s", err) + } + + if err := checkFiles(fileList.Files); err != nil { + return nil, err + } + + relPaths, err := prepareRemoteRelPaths(rootDir.Id, fileList.Files) + if err != nil { + return nil, err + } + + var remoteFiles []*remoteFile + for _, f := range fileList.Files { + relPath, ok := relPaths[f.Id] + if !ok { + return nil, fmt.Errorf("File %s does not have a valid parent, aborting...", f.Id) + } + remoteFiles = append(remoteFiles, &remoteFile{ + relPath: relPath, + file: f, + }) + } + + return remoteFiles, nil +} + +func prepareRemoteRelPaths(rootId string, files []*drive.File) (map[string]string, error) { + names := map[string]string{} + idGraph := graph.NewDefaultGraph() + + for _, f := range files { + // Store directory name for quick lookup + names[f.Id] = f.Name + + // Store path between parent and child folder + idGraph.AddVertex(f.Id) + idGraph.AddVertex(f.Parents[0]) + idGraph.AddEdge(f.Parents[0], f.Id, 0) + } + + paths := map[string]string{} + + for _, f := range files { + // Find path from root to directory + pathIds, _, err := graph.Dijkstra(idGraph, rootId, f.Id) + if err != nil { + return nil, err + } + + // Convert path ids to path names + var pathNames []string + for _, id := range pathIds { + pathNames = append(pathNames, names[id]) + } + + // Store relative file path from root to directory + paths[f.Id] = filepath.Join(pathNames...) + } + + return paths, nil +} + +func checkFiles(files []*drive.File) error { + uniq := map[string]string{} + + for _, f := range files { + // Ensure all files have exactly one parent + if len(f.Parents) != 1 { + return fmt.Errorf("File %s does not have exacly one parent, aborting...", f.Id) + } + + // Ensure that there are no duplicate files + uniqKey := f.Name + f.Parents[0] + if dupeId, isDupe := uniq[uniqKey]; isDupe { + return fmt.Errorf("Found name collision between %s and %s, aborting", f.Id, dupeId) + } + uniq[uniqKey] = f.Id + } + + return nil +} + +type localFile struct { + absPath string + relPath string + info os.FileInfo +} + +type remoteFile struct { + relPath string + file *drive.File +} + +type changedFile struct { + local *localFile + remote *remoteFile +} + +type syncFiles struct { + root *remoteFile + local []*localFile + remote []*remoteFile +} + +func (self *syncFiles) filterMissingRemoteDirs() []*localFile { + var files []*localFile + + for _, f := range self.local { + if f.info.IsDir() && !self.existsRemote(f) { + files = append(files, f) + } + } + + return files +} + +func (self *syncFiles) filterMissingLocalDirs() []*remoteFile { + var files []*remoteFile + + for _, rf := range self.remote { + if isDir(rf.file) && !self.existsLocal(rf) { + files = append(files, rf) + } + } + + return files +} + +func (self *syncFiles) filterMissingRemoteFiles() []*localFile { + var files []*localFile + + for _, f := range self.local { + if !f.info.IsDir() && !self.existsRemote(f) { + files = append(files, f) + } + } + + return files +} + +func (self *syncFiles) filterMissingLocalFiles() []*remoteFile { + var files []*remoteFile + + for _, rf := range self.remote { + if !isDir(rf.file) && !self.existsLocal(rf) { + files = append(files, rf) + } + } + + return files +} + +func (self *syncFiles) filterChangedLocalFiles() []*changedFile { + var files []*changedFile + + for _, lf := range self.local { + // Skip directories + if lf.info.IsDir() { + continue + } + + // Skip files that don't exist on drive + rf, found := self.findRemoteByPath(lf.relPath) + if !found { + continue + } + + // Add files where remote md5 sum does not match local + if rf.file.Md5Checksum != md5sum(lf.absPath) { + files = append(files, &changedFile{ + local: lf, + remote: rf, + }) + } + } + + return files +} + +func (self *syncFiles) filterChangedRemoteFiles() []*changedFile { + var files []*changedFile + + for _, rf := range self.remote { + // Skip directories + if isDir(rf.file) { + continue + } + + // Skip local files that don't exist + lf, found := self.findLocalByPath(rf.relPath) + if !found { + continue + } + + // Add files where remote md5 sum does not match local + if rf.file.Md5Checksum != md5sum(lf.absPath) { + files = append(files, &changedFile{ + local: lf, + remote: rf, + }) + } + } + + return files +} + +func (self *syncFiles) filterExtraneousRemoteFiles() []*remoteFile { + var files []*remoteFile + + for _, rf := range self.remote { + if !self.existsLocal(rf) { + files = append(files, rf) + } + } + + return files +} + +func (self *syncFiles) filterExtraneousLocalFiles() []*localFile { + var files []*localFile + + for _, lf := range self.local { + if !self.existsRemote(lf) { + files = append(files, lf) + } + } + + return files +} + +func (self *syncFiles) existsRemote(lf *localFile) bool { + _, found := self.findRemoteByPath(lf.relPath) + return found +} + +func (self *syncFiles) existsLocal(rf *remoteFile) bool { + _, found := self.findLocalByPath(rf.relPath) + return found +} + +func (self *syncFiles) findRemoteByPath(relPath string) (*remoteFile, bool) { + if relPath == "." { + return self.root, true + } + + for _, rf := range self.remote { + if relPath == rf.relPath { + return rf, true + } + } + + return nil, false +} + +func (self *syncFiles) findLocalByPath(relPath string) (*localFile, bool) { + for _, lf := range self.local { + if relPath == lf.relPath { + return lf, true + } + } + + return nil, false +} + +type byLocalPathLength []*localFile + +func (self byLocalPathLength) Len() int { + return len(self) +} + +func (self byLocalPathLength) Swap(i, j int) { + self[i], self[j] = self[j], self[i] +} + +func (self byLocalPathLength) Less(i, j int) bool { + return pathLength(self[i].relPath) < pathLength(self[j].relPath) +} + +type byRemotePathLength []*remoteFile + +func (self byRemotePathLength) Len() int { + return len(self) +} + +func (self byRemotePathLength) Swap(i, j int) { + self[i], self[j] = self[j], self[i] +} + +func (self byRemotePathLength) Less(i, j int) bool { + return pathLength(self[i].relPath) < pathLength(self[j].relPath) +} diff --git a/drive/upload_sync.go b/drive/upload_sync.go index 0af83f09..a2a06404 100644 --- a/drive/upload_sync.go +++ b/drive/upload_sync.go @@ -7,7 +7,6 @@ import ( "time" "sort" "path/filepath" - "github.com/gyuho/goraph/graph" "google.golang.org/api/googleapi" "google.golang.org/api/drive/v3" ) @@ -73,37 +72,6 @@ func (self *Drive) UploadSync(args UploadSyncArgs) error { return nil } -func (self *Drive) prepareSyncFiles(localPath string, root *drive.File) (*syncFiles, error) { - localCh := make(chan struct{files []*localFile; err error}) - remoteCh := make(chan struct{files []*remoteFile; err error}) - - go func() { - files, err := prepareLocalFiles(localPath) - localCh <- struct{files []*localFile; err error}{files, err} - }() - - go func() { - files, err := self.prepareRemoteFiles(root) - remoteCh <- struct{files []*remoteFile; err error}{files, err} - }() - - local := <-localCh - if local.err != nil { - return nil, local.err - } - - remote := <-remoteCh - if remote.err != nil { - return nil, remote.err - } - - return &syncFiles{ - root: &remoteFile{file: root}, - local: local.files, - remote: remote.files, - }, nil -} - func (self *Drive) prepareSyncRoot(args UploadSyncArgs) (*drive.File, error) { fields := []googleapi.Field{"id", "name", "mimeType", "appProperties"} f, err := self.service.Files.Get(args.RootId).Fields(fields...).Do() @@ -312,38 +280,6 @@ func (self *Drive) deleteRemoteFile(rf *remoteFile, args UploadSyncArgs) error { return nil } -func (self *Drive) prepareRemoteFiles(rootDir *drive.File) ([]*remoteFile, error) { - // Find all files which has rootDir as root - query := fmt.Sprintf("appProperties has {key='syncRootId' and value='%s'}", rootDir.Id) - fileList, err := self.service.Files.List().Q(query).Fields("files(id,name,parents,md5Checksum,mimeType)").Do() - if err != nil { - return nil, fmt.Errorf("Failed listing files: %s", err) - } - - if err := checkFiles(fileList.Files); err != nil { - return nil, err - } - - relPaths, err := prepareRemoteRelPaths(rootDir.Id, fileList.Files) - if err != nil { - return nil, err - } - - var remoteFiles []*remoteFile - for _, f := range fileList.Files { - relPath, ok := relPaths[f.Id] - if !ok { - return nil, fmt.Errorf("File %s does not have a valid parent, aborting...", f.Id) - } - remoteFiles = append(remoteFiles, &remoteFile{ - relPath: relPath, - file: f, - }) - } - - return remoteFiles, nil -} - func (self *Drive) dirIsEmpty(id string) (bool, error) { query := fmt.Sprintf("'%s' in parents", id) fileList, err := self.service.Files.List().Q(query).Do() @@ -353,309 +289,3 @@ func (self *Drive) dirIsEmpty(id string) (bool, error) { return len(fileList.Files) == 0, nil } - -func checkFiles(files []*drive.File) error { - uniq := map[string]string{} - - for _, f := range files { - // Ensure all files have exactly one parent - if len(f.Parents) != 1 { - return fmt.Errorf("File %s does not have exacly one parent, aborting...", f.Id) - } - - // Ensure that there are no duplicate files - uniqKey := f.Name + f.Parents[0] - if dupeId, isDupe := uniq[uniqKey]; isDupe { - return fmt.Errorf("Found name collision between %s and %s, aborting", f.Id, dupeId) - } - uniq[uniqKey] = f.Id - } - - return nil -} - -func prepareRemoteRelPaths(rootId string, files []*drive.File) (map[string]string, error) { - names := map[string]string{} - idGraph := graph.NewDefaultGraph() - - for _, f := range files { - // Store directory name for quick lookup - names[f.Id] = f.Name - - // Store path between parent and child folder - idGraph.AddVertex(f.Id) - idGraph.AddVertex(f.Parents[0]) - idGraph.AddEdge(f.Parents[0], f.Id, 0) - } - - paths := map[string]string{} - - for _, f := range files { - // Find path from root to directory - pathIds, _, err := graph.Dijkstra(idGraph, rootId, f.Id) - if err != nil { - return nil, err - } - - // Convert path ids to path names - var pathNames []string - for _, id := range pathIds { - pathNames = append(pathNames, names[id]) - } - - // Store relative file path from root to directory - paths[f.Id] = filepath.Join(pathNames...) - } - - return paths, nil -} - -type localFile struct { - absPath string - relPath string - info os.FileInfo -} - -type remoteFile struct { - relPath string - file *drive.File -} - -type changedFile struct { - local *localFile - remote *remoteFile -} - -func prepareLocalFiles(root string) ([]*localFile, error) { - var files []*localFile - - // Get absolute root path - absRootPath, err := filepath.Abs(root) - if err != nil { - return nil, err - } - - err = filepath.Walk(absRootPath, func(absPath string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - // Skip root directory - if absPath == absRootPath { - return nil - } - - relPath, err := filepath.Rel(absRootPath, absPath) - if err != nil { - return err - } - - files = append(files, &localFile{ - absPath: absPath, - relPath: relPath, - info: info, - }) - - return nil - }) - - if err != nil { - return nil, fmt.Errorf("Failed to prepare local files: %s", err) - } - - return files, err -} - -type syncFiles struct { - root *remoteFile - local []*localFile - remote []*remoteFile -} - -func (self *syncFiles) filterMissingRemoteDirs() []*localFile { - var files []*localFile - - for _, f := range self.local { - if f.info.IsDir() && !self.existsRemote(f) { - files = append(files, f) - } - } - - return files -} - -func (self *syncFiles) filterMissingLocalDirs() []*remoteFile { - var files []*remoteFile - - for _, rf := range self.remote { - if isDir(rf.file) && !self.existsLocal(rf) { - files = append(files, rf) - } - } - - return files -} - -func (self *syncFiles) filterMissingRemoteFiles() []*localFile { - var files []*localFile - - for _, f := range self.local { - if !f.info.IsDir() && !self.existsRemote(f) { - files = append(files, f) - } - } - - return files -} - -func (self *syncFiles) filterMissingLocalFiles() []*remoteFile { - var files []*remoteFile - - for _, rf := range self.remote { - if !isDir(rf.file) && !self.existsLocal(rf) { - files = append(files, rf) - } - } - - return files -} - -func (self *syncFiles) filterChangedLocalFiles() []*changedFile { - var files []*changedFile - - for _, lf := range self.local { - // Skip directories - if lf.info.IsDir() { - continue - } - - // Skip files that don't exist on drive - rf, found := self.findRemoteByPath(lf.relPath) - if !found { - continue - } - - // Add files where remote md5 sum does not match local - if rf.file.Md5Checksum != md5sum(lf.absPath) { - files = append(files, &changedFile{ - local: lf, - remote: rf, - }) - } - } - - return files -} - -func (self *syncFiles) filterChangedRemoteFiles() []*changedFile { - var files []*changedFile - - for _, rf := range self.remote { - // Skip directories - if isDir(rf.file) { - continue - } - - // Skip local files that don't exist - lf, found := self.findLocalByPath(rf.relPath) - if !found { - continue - } - - // Add files where remote md5 sum does not match local - if rf.file.Md5Checksum != md5sum(lf.absPath) { - files = append(files, &changedFile{ - local: lf, - remote: rf, - }) - } - } - - return files -} - -func (self *syncFiles) filterExtraneousRemoteFiles() []*remoteFile { - var files []*remoteFile - - for _, rf := range self.remote { - if !self.existsLocal(rf) { - files = append(files, rf) - } - } - - return files -} - -func (self *syncFiles) filterExtraneousLocalFiles() []*localFile { - var files []*localFile - - for _, lf := range self.local { - if !self.existsRemote(lf) { - files = append(files, lf) - } - } - - return files -} - -func (self *syncFiles) existsRemote(lf *localFile) bool { - _, found := self.findRemoteByPath(lf.relPath) - return found -} - -func (self *syncFiles) existsLocal(rf *remoteFile) bool { - _, found := self.findLocalByPath(rf.relPath) - return found -} - -func (self *syncFiles) findRemoteByPath(relPath string) (*remoteFile, bool) { - if relPath == "." { - return self.root, true - } - - for _, rf := range self.remote { - if relPath == rf.relPath { - return rf, true - } - } - - return nil, false -} - -func (self *syncFiles) findLocalByPath(relPath string) (*localFile, bool) { - for _, lf := range self.local { - if relPath == lf.relPath { - return lf, true - } - } - - return nil, false -} - -type byLocalPathLength []*localFile - -func (self byLocalPathLength) Len() int { - return len(self) -} - -func (self byLocalPathLength) Swap(i, j int) { - self[i], self[j] = self[j], self[i] -} - -func (self byLocalPathLength) Less(i, j int) bool { - return pathLength(self[i].relPath) < pathLength(self[j].relPath) -} - -type byRemotePathLength []*remoteFile - -func (self byRemotePathLength) Len() int { - return len(self) -} - -func (self byRemotePathLength) Swap(i, j int) { - self[i], self[j] = self[j], self[i] -} - -func (self byRemotePathLength) Less(i, j int) bool { - return pathLength(self[i].relPath) < pathLength(self[j].relPath) -} From 4c317d16b6152bdd7972f91bf7ccb9ead8900c09 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Tue, 2 Feb 2016 22:57:48 +0100 Subject: [PATCH 072/195] Close files --- drive/upload.go | 6 ++++++ drive/upload_sync.go | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/drive/upload.go b/drive/upload.go index 1e145f72..3a1a34c4 100644 --- a/drive/upload.go +++ b/drive/upload.go @@ -59,6 +59,9 @@ func (self *Drive) uploadDirectory(args UploadArgs) error { return err } + // Close file on function exit + defer srcFile.Close() + // Make directory on drive f, err := self.mkdir(MkdirArgs{ Out: args.Out, @@ -98,6 +101,9 @@ func (self *Drive) uploadFile(args UploadArgs) (*drive.File, error) { return nil, err } + // Close file on function exit + defer srcFile.Close() + // Instantiate empty drive file dstFile := &drive.File{} diff --git a/drive/upload_sync.go b/drive/upload_sync.go index a2a06404..66e14566 100644 --- a/drive/upload_sync.go +++ b/drive/upload_sync.go @@ -227,6 +227,9 @@ func (self *Drive) uploadMissingFile(parentId string, lf *localFile, args Upload return fmt.Errorf("Failed to open file: %s", err) } + // Close file on function exit + defer srcFile.Close() + // Instantiate drive file dstFile := &drive.File{ Name: lf.info.Name(), @@ -254,6 +257,9 @@ func (self *Drive) updateChangedFile(cf *changedFile, args UploadSyncArgs) error return fmt.Errorf("Failed to open file: %s", err) } + // Close file on function exit + defer srcFile.Close() + // Instantiate drive file dstFile := &drive.File{} From 926a805a153bd99e113543b0a6d43807eb3e49f3 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Tue, 2 Feb 2016 23:04:51 +0100 Subject: [PATCH 073/195] Use same short var name as elsewhere --- drive/sync.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drive/sync.go b/drive/sync.go index 7054ab93..a2c07470 100644 --- a/drive/sync.go +++ b/drive/sync.go @@ -192,9 +192,9 @@ type syncFiles struct { func (self *syncFiles) filterMissingRemoteDirs() []*localFile { var files []*localFile - for _, f := range self.local { - if f.info.IsDir() && !self.existsRemote(f) { - files = append(files, f) + for _, lf := range self.local { + if lf.info.IsDir() && !self.existsRemote(lf) { + files = append(files, lf) } } @@ -216,9 +216,9 @@ func (self *syncFiles) filterMissingLocalDirs() []*remoteFile { func (self *syncFiles) filterMissingRemoteFiles() []*localFile { var files []*localFile - for _, f := range self.local { - if !f.info.IsDir() && !self.existsRemote(f) { - files = append(files, f) + for _, lf := range self.local { + if !lf.info.IsDir() && !self.existsRemote(lf) { + files = append(files, lf) } } From 1a83dbc57e1bd7481f6c716e7705a9712a726008 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Tue, 2 Feb 2016 23:07:35 +0100 Subject: [PATCH 074/195] Remove 'aborting' suffix --- drive/sync.go | 6 +++--- drive/upload_sync.go | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drive/sync.go b/drive/sync.go index a2c07470..7e809dc8 100644 --- a/drive/sync.go +++ b/drive/sync.go @@ -100,7 +100,7 @@ func (self *Drive) prepareRemoteFiles(rootDir *drive.File) ([]*remoteFile, error for _, f := range fileList.Files { relPath, ok := relPaths[f.Id] if !ok { - return nil, fmt.Errorf("File %s does not have a valid parent, aborting...", f.Id) + return nil, fmt.Errorf("File %s does not have a valid parent", f.Id) } remoteFiles = append(remoteFiles, &remoteFile{ relPath: relPath, @@ -153,13 +153,13 @@ func checkFiles(files []*drive.File) error { for _, f := range files { // Ensure all files have exactly one parent if len(f.Parents) != 1 { - return fmt.Errorf("File %s does not have exacly one parent, aborting...", f.Id) + return fmt.Errorf("File %s does not have exacly one parent", f.Id) } // Ensure that there are no duplicate files uniqKey := f.Name + f.Parents[0] if dupeId, isDupe := uniq[uniqKey]; isDupe { - return fmt.Errorf("Found name collision between %s and %s, aborting", f.Id, dupeId) + return fmt.Errorf("Found name collision between %s and %s", f.Id, dupeId) } uniq[uniqKey] = f.Id } diff --git a/drive/upload_sync.go b/drive/upload_sync.go index 66e14566..d1c155de 100644 --- a/drive/upload_sync.go +++ b/drive/upload_sync.go @@ -129,7 +129,7 @@ func (self *Drive) createMissingRemoteDirs(files *syncFiles, args UploadSyncArgs parentPath := parentFilePath(lf.relPath) parent, ok := files.findRemoteByPath(parentPath) if !ok { - return nil, fmt.Errorf("Could not find remote directory with path '%s', aborting...", parentPath) + return nil, fmt.Errorf("Could not find remote directory with path '%s'", parentPath) } dstFile := &drive.File{ @@ -167,7 +167,7 @@ func (self *Drive) uploadMissingFiles(files *syncFiles, args UploadSyncArgs) err parentPath := parentFilePath(lf.relPath) parent, ok := files.findRemoteByPath(parentPath) if !ok { - return fmt.Errorf("Could not find remote directory with path '%s', aborting...", parentPath) + return fmt.Errorf("Could not find remote directory with path '%s'", parentPath) } fmt.Fprintf(args.Out, "[%04d/%04d] Uploading %s -> %s\n", i + 1, missingCount, lf.absPath, filepath.Join(files.root.file.Name, lf.relPath)) From 1131605c14244c575991999051649f04f2e9f8bd Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Wed, 3 Feb 2016 20:24:38 +0100 Subject: [PATCH 075/195] Rename command to avoid overlap, change order --- gdrive.go | 404 +++++++++++++++++++++++++++--------------------------- 1 file changed, 202 insertions(+), 202 deletions(-) diff --git a/gdrive.go b/gdrive.go index 19cbdf0e..ba8e53ac 100644 --- a/gdrive.go +++ b/gdrive.go @@ -75,74 +75,6 @@ func main() { }, }, }, - &cli.Handler{ - Pattern: "[global] list changes [options]", - Description: "List file changes", - Callback: listChangesHandler, - Flags: cli.Flags{ - "global": globalFlags, - "options": []cli.Flag{ - cli.IntFlag{ - Name: "maxChanges", - Patterns: []string{"-m", "--max"}, - Description: fmt.Sprintf("Max changes to list, default: %d", DefaultMaxChanges), - DefaultValue: DefaultMaxChanges, - }, - cli.StringFlag{ - Name: "pageToken", - Patterns: []string{"--since"}, - Description: fmt.Sprintf("Page token to start listing changes from"), - DefaultValue: "1", - }, - cli.BoolFlag{ - Name: "now", - Patterns: []string{"--now"}, - Description: fmt.Sprintf("Get latest page token"), - OmitValue: true, - }, - cli.IntFlag{ - Name: "nameWidth", - Patterns: []string{"--name-width"}, - Description: fmt.Sprintf("Width of name column, default: %d, minimum: 9, use 0 for full width", DefaultNameWidth), - DefaultValue: DefaultNameWidth, - }, - cli.BoolFlag{ - Name: "skipHeader", - Patterns: []string{"--no-header"}, - Description: "Dont print the header", - OmitValue: true, - }, - }, - }, - }, - &cli.Handler{ - Pattern: "[global] list revisions [options] ", - Description: "List file revisions", - Callback: listRevisionsHandler, - Flags: cli.Flags{ - "global": globalFlags, - "options": []cli.Flag{ - cli.IntFlag{ - Name: "nameWidth", - Patterns: []string{"--name-width"}, - Description: fmt.Sprintf("Width of name column, default: %d, minimum: 9, use 0 for full width", DefaultNameWidth), - DefaultValue: DefaultNameWidth, - }, - cli.BoolFlag{ - Name: "skipHeader", - Patterns: []string{"--no-header"}, - Description: "Dont print the header", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "sizeInBytes", - Patterns: []string{"--bytes"}, - Description: "Size in bytes", - OmitValue: true, - }, - }, - }, - }, &cli.Handler{ Pattern: "[global] download [options] ", Description: "Download file or directory", @@ -182,56 +114,6 @@ func main() { }, }, }, - &cli.Handler{ - Pattern: "[global] download sync [options] ", - Description: "Sync drive directory to local directory", - Callback: downloadSyncHandler, - Flags: cli.Flags{ - "global": globalFlags, - "options": []cli.Flag{ - cli.BoolFlag{ - Name: "noProgress", - Patterns: []string{"--no-progress"}, - Description: "Hide progress", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "deleteExtraneous", - Patterns: []string{"--delete-extraneous"}, - Description: "Delete extraneous local files", - OmitValue: true, - }, - }, - }, - }, - &cli.Handler{ - Pattern: "[global] download revision [options] ", - Description: "Download revision", - Callback: downloadRevisionHandler, - Flags: cli.Flags{ - "global": globalFlags, - "options": []cli.Flag{ - cli.BoolFlag{ - Name: "force", - Patterns: []string{"-f", "--force"}, - Description: "Overwrite existing file", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "noProgress", - Patterns: []string{"--no-progress"}, - Description: "Hide progress", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "stdout", - Patterns: []string{"--stdout"}, - Description: "Write file content to stdout", - OmitValue: true, - }, - }, - }, - }, &cli.Handler{ Pattern: "[global] upload [options] ", Description: "Upload file or directory", @@ -282,7 +164,7 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global] upload stdin [options] ", + Pattern: "[global] upload - [options] ", Description: "Upload file from stdin", Callback: uploadStdinHandler, Flags: cli.Flags{ @@ -313,34 +195,6 @@ func main() { }, }, }, - &cli.Handler{ - Pattern: "[global] upload sync [options] ", - Description: "Sync local directory to drive", - Callback: uploadSyncHandler, - Flags: cli.Flags{ - "global": globalFlags, - "options": []cli.Flag{ - cli.BoolFlag{ - Name: "noProgress", - Patterns: []string{"--no-progress"}, - Description: "Hide progress", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "deleteExtraneous", - Patterns: []string{"--delete-extraneous"}, - Description: "Delete extraneous remote files", - OmitValue: true, - }, - cli.IntFlag{ - Name: "chunksize", - Patterns: []string{"--chunksize"}, - Description: fmt.Sprintf("Set chunk size in bytes, default: %d", DefaultUploadChunkSize), - DefaultValue: DefaultUploadChunkSize, - }, - }, - }, - }, &cli.Handler{ Pattern: "[global] update [options] ", Description: "Update file, this creates a new revision of the file", @@ -406,60 +260,6 @@ func main() { }, }, }, - &cli.Handler{ - Pattern: "[global] import [options] ", - Description: "Upload and convert file to a google document, see 'about import' for available conversions", - Callback: importHandler, - Flags: cli.Flags{ - "global": globalFlags, - "options": []cli.Flag{ - cli.StringSliceFlag{ - Name: "parent", - Patterns: []string{"-p", "--parent"}, - Description: "Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents", - }, - cli.BoolFlag{ - Name: "noProgress", - Patterns: []string{"--no-progress"}, - Description: "Hide progress", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "share", - Patterns: []string{"--share"}, - Description: "Share file", - OmitValue: true, - }, - }, - }, - }, - &cli.Handler{ - Pattern: "[global] export [options] ", - Description: "Export a google document", - Callback: exportHandler, - Flags: cli.Flags{ - "global": globalFlags, - "options": []cli.Flag{ - cli.BoolFlag{ - Name: "force", - Patterns: []string{"-f", "--force"}, - Description: "Overwrite existing file", - OmitValue: true, - }, - cli.StringFlag{ - Name: "mime", - Patterns: []string{"--mime"}, - Description: "Mime type of exported file", - }, - cli.BoolFlag{ - Name: "printMimes", - Patterns: []string{"--print-mimes"}, - Description: "Print available mime types for given file", - OmitValue: true, - }, - }, - }, - }, &cli.Handler{ Pattern: "[global] mkdir [options] ", Description: "Create directory", @@ -529,13 +329,213 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global] delete revision ", + Pattern: "[global] sync download [options] ", + Description: "Sync drive directory to local directory", + Callback: downloadSyncHandler, + Flags: cli.Flags{ + "global": globalFlags, + "options": []cli.Flag{ + cli.BoolFlag{ + Name: "noProgress", + Patterns: []string{"--no-progress"}, + Description: "Hide progress", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "deleteExtraneous", + Patterns: []string{"--delete-extraneous"}, + Description: "Delete extraneous local files", + OmitValue: true, + }, + }, + }, + }, + &cli.Handler{ + Pattern: "[global] sync upload [options] ", + Description: "Sync local directory to drive", + Callback: uploadSyncHandler, + Flags: cli.Flags{ + "global": globalFlags, + "options": []cli.Flag{ + cli.BoolFlag{ + Name: "noProgress", + Patterns: []string{"--no-progress"}, + Description: "Hide progress", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "deleteExtraneous", + Patterns: []string{"--delete-extraneous"}, + Description: "Delete extraneous remote files", + OmitValue: true, + }, + cli.IntFlag{ + Name: "chunksize", + Patterns: []string{"--chunksize"}, + Description: fmt.Sprintf("Set chunk size in bytes, default: %d", DefaultUploadChunkSize), + DefaultValue: DefaultUploadChunkSize, + }, + }, + }, + }, + &cli.Handler{ + Pattern: "[global] changes [options]", + Description: "List file changes", + Callback: listChangesHandler, + Flags: cli.Flags{ + "global": globalFlags, + "options": []cli.Flag{ + cli.IntFlag{ + Name: "maxChanges", + Patterns: []string{"-m", "--max"}, + Description: fmt.Sprintf("Max changes to list, default: %d", DefaultMaxChanges), + DefaultValue: DefaultMaxChanges, + }, + cli.StringFlag{ + Name: "pageToken", + Patterns: []string{"--since"}, + Description: fmt.Sprintf("Page token to start listing changes from"), + DefaultValue: "1", + }, + cli.BoolFlag{ + Name: "now", + Patterns: []string{"--now"}, + Description: fmt.Sprintf("Get latest page token"), + OmitValue: true, + }, + cli.IntFlag{ + Name: "nameWidth", + Patterns: []string{"--name-width"}, + Description: fmt.Sprintf("Width of name column, default: %d, minimum: 9, use 0 for full width", DefaultNameWidth), + DefaultValue: DefaultNameWidth, + }, + cli.BoolFlag{ + Name: "skipHeader", + Patterns: []string{"--no-header"}, + Description: "Dont print the header", + OmitValue: true, + }, + }, + }, + }, + &cli.Handler{ + Pattern: "[global] revision list [options] ", + Description: "List file revisions", + Callback: listRevisionsHandler, + Flags: cli.Flags{ + "global": globalFlags, + "options": []cli.Flag{ + cli.IntFlag{ + Name: "nameWidth", + Patterns: []string{"--name-width"}, + Description: fmt.Sprintf("Width of name column, default: %d, minimum: 9, use 0 for full width", DefaultNameWidth), + DefaultValue: DefaultNameWidth, + }, + cli.BoolFlag{ + Name: "skipHeader", + Patterns: []string{"--no-header"}, + Description: "Dont print the header", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "sizeInBytes", + Patterns: []string{"--bytes"}, + Description: "Size in bytes", + OmitValue: true, + }, + }, + }, + }, + &cli.Handler{ + Pattern: "[global] revision download [options] ", + Description: "Download revision", + Callback: downloadRevisionHandler, + Flags: cli.Flags{ + "global": globalFlags, + "options": []cli.Flag{ + cli.BoolFlag{ + Name: "force", + Patterns: []string{"-f", "--force"}, + Description: "Overwrite existing file", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "noProgress", + Patterns: []string{"--no-progress"}, + Description: "Hide progress", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "stdout", + Patterns: []string{"--stdout"}, + Description: "Write file content to stdout", + OmitValue: true, + }, + }, + }, + }, + &cli.Handler{ + Pattern: "[global] revision delete ", Description: "Delete file revision", Callback: deleteRevisionHandler, Flags: cli.Flags{ "global": globalFlags, }, }, + &cli.Handler{ + Pattern: "[global] import [options] ", + Description: "Upload and convert file to a google document, see 'about import' for available conversions", + Callback: importHandler, + Flags: cli.Flags{ + "global": globalFlags, + "options": []cli.Flag{ + cli.StringSliceFlag{ + Name: "parent", + Patterns: []string{"-p", "--parent"}, + Description: "Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents", + }, + cli.BoolFlag{ + Name: "noProgress", + Patterns: []string{"--no-progress"}, + Description: "Hide progress", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "share", + Patterns: []string{"--share"}, + Description: "Share file", + OmitValue: true, + }, + }, + }, + }, + &cli.Handler{ + Pattern: "[global] export [options] ", + Description: "Export a google document", + Callback: exportHandler, + Flags: cli.Flags{ + "global": globalFlags, + "options": []cli.Flag{ + cli.BoolFlag{ + Name: "force", + Patterns: []string{"-f", "--force"}, + Description: "Overwrite existing file", + OmitValue: true, + }, + cli.StringFlag{ + Name: "mime", + Patterns: []string{"--mime"}, + Description: "Mime type of exported file", + }, + cli.BoolFlag{ + Name: "printMimes", + Patterns: []string{"--print-mimes"}, + Description: "Print available mime types for given file", + OmitValue: true, + }, + }, + }, + }, &cli.Handler{ Pattern: "[global] about [options]", Description: "Google drive metadata, quota usage", From 6d0a23e4bdf5ffa36cecb2cb85a6291f9235f619 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Wed, 3 Feb 2016 20:33:43 +0100 Subject: [PATCH 076/195] Rename files --- drive/{download_sync.go => sync_download.go} | 0 drive/{upload_sync.go => sync_upload.go} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename drive/{download_sync.go => sync_download.go} (100%) rename drive/{upload_sync.go => sync_upload.go} (100%) diff --git a/drive/download_sync.go b/drive/sync_download.go similarity index 100% rename from drive/download_sync.go rename to drive/sync_download.go diff --git a/drive/upload_sync.go b/drive/sync_upload.go similarity index 100% rename from drive/upload_sync.go rename to drive/sync_upload.go From 5f1972a0d689b9965fc3fb9f5b9a5f9487fb0f46 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Wed, 3 Feb 2016 20:52:51 +0100 Subject: [PATCH 077/195] Add dry-run flag for syncing --- drive/sync_download.go | 21 +++++++++++++++++++++ drive/sync_upload.go | 39 +++++++++++++++++++++++++++++++-------- gdrive.go | 12 ++++++++++++ handlers_drive.go | 2 ++ 4 files changed, 66 insertions(+), 8 deletions(-) diff --git a/drive/sync_download.go b/drive/sync_download.go index 37c78cb6..ba86aaee 100644 --- a/drive/sync_download.go +++ b/drive/sync_download.go @@ -16,6 +16,7 @@ type DownloadSyncArgs struct { Progress io.Writer RootId string Path string + DryRun bool DeleteExtraneous bool } @@ -104,6 +105,11 @@ func (self *Drive) createMissingLocalDirs(files *syncFiles, args DownloadSyncArg return nil, fmt.Errorf("Failed to determine local absolute path: %s", err) } fmt.Fprintf(args.Out, "[%04d/%04d] Creating directory: %s\n", i + 1, missingCount, path) + + if args.DryRun { + continue + } + mkdir(path) } @@ -125,6 +131,11 @@ func (self *Drive) downloadMissingFiles(files *syncFiles, args DownloadSyncArgs) return fmt.Errorf("Failed to determine local absolute path: %s", err) } fmt.Fprintf(args.Out, "[%04d/%04d] Downloading %s -> %s\n", i + 1, missingCount, remotePath, localPath) + + if args.DryRun { + continue + } + err = self.downloadRemoteFile(rf.file.Id, localPath, args) if err != nil { return err @@ -149,6 +160,11 @@ func (self *Drive) downloadChangedFiles(files *syncFiles, args DownloadSyncArgs) return fmt.Errorf("Failed to determine local absolute path: %s", err) } fmt.Fprintf(args.Out, "[%04d/%04d] Downloading %s -> %s\n", i + 1, changedCount, remotePath, localPath) + + if args.DryRun { + continue + } + err = self.downloadRemoteFile(cf.remote.file.Id, localPath, args) if err != nil { return err @@ -206,6 +222,11 @@ func (self *Drive) deleteExtraneousLocalFiles(files *syncFiles, args DownloadSyn for i, lf := range extraneousFiles { fmt.Fprintf(args.Out, "[%04d/%04d] Deleting %s\n", i + 1, extraneousCount, lf.absPath) + + if args.DryRun { + continue + } + err := os.Remove(lf.absPath) if err != nil { return fmt.Errorf("Failed to delete local file: %s", err) diff --git a/drive/sync_upload.go b/drive/sync_upload.go index d1c155de..151a9f83 100644 --- a/drive/sync_upload.go +++ b/drive/sync_upload.go @@ -16,6 +16,7 @@ type UploadSyncArgs struct { Progress io.Writer Path string RootId string + DryRun bool DeleteExtraneous bool ChunkSize int64 } @@ -141,15 +142,22 @@ func (self *Drive) createMissingRemoteDirs(files *syncFiles, args UploadSyncArgs fmt.Fprintf(args.Out, "[%04d/%04d] Creating directory: %s\n", i + 1, missingCount, filepath.Join(files.root.file.Name, lf.relPath)) - f, err := self.service.Files.Create(dstFile).Do() - if err != nil { - return nil, fmt.Errorf("Failed to create directory: %s", err) + if args.DryRun { + files.remote = append(files.remote, &remoteFile{ + relPath: lf.relPath, + file: dstFile, + }) + } else { + f, err := self.service.Files.Create(dstFile).Do() + if err != nil { + return nil, fmt.Errorf("Failed to create directory: %s", err) + } + + files.remote = append(files.remote, &remoteFile{ + relPath: lf.relPath, + file: f, + }) } - - files.remote = append(files.remote, &remoteFile{ - relPath: lf.relPath, - file: f, - }) } return files, nil @@ -171,6 +179,11 @@ func (self *Drive) uploadMissingFiles(files *syncFiles, args UploadSyncArgs) err } fmt.Fprintf(args.Out, "[%04d/%04d] Uploading %s -> %s\n", i + 1, missingCount, lf.absPath, filepath.Join(files.root.file.Name, lf.relPath)) + + if args.DryRun { + continue + } + err := self.uploadMissingFile(parent.file.Id, lf, args) if err != nil { return err @@ -190,6 +203,11 @@ func (self *Drive) updateChangedFiles(files *syncFiles, args UploadSyncArgs) err for i, cf := range changedFiles { fmt.Fprintf(args.Out, "[%04d/%04d] Updating %s -> %s\n", i + 1, changedCount, cf.local.absPath, filepath.Join(files.root.file.Name, cf.local.relPath)) + + if args.DryRun { + continue + } + err := self.updateChangedFile(cf, args) if err != nil { return err @@ -212,6 +230,11 @@ func (self *Drive) deleteExtraneousRemoteFiles(files *syncFiles, args UploadSync for i, rf := range extraneousFiles { fmt.Fprintf(args.Out, "[%04d/%04d] Deleting %s\n", i + 1, extraneousCount, filepath.Join(files.root.file.Name, rf.relPath)) + + if args.DryRun { + continue + } + err := self.deleteRemoteFile(rf, args) if err != nil { return err diff --git a/gdrive.go b/gdrive.go index ba8e53ac..67147481 100644 --- a/gdrive.go +++ b/gdrive.go @@ -341,6 +341,12 @@ func main() { Description: "Hide progress", OmitValue: true, }, + cli.BoolFlag{ + Name: "dryRun", + Patterns: []string{"--dry-run"}, + Description: "Show what would have been transferred", + OmitValue: true, + }, cli.BoolFlag{ Name: "deleteExtraneous", Patterns: []string{"--delete-extraneous"}, @@ -357,6 +363,12 @@ func main() { Flags: cli.Flags{ "global": globalFlags, "options": []cli.Flag{ + cli.BoolFlag{ + Name: "dryRun", + Patterns: []string{"--dry-run"}, + Description: "Show what would have been transferred", + OmitValue: true, + }, cli.BoolFlag{ Name: "noProgress", Patterns: []string{"--no-progress"}, diff --git a/handlers_drive.go b/handlers_drive.go index 2b6d61e2..8e7540c7 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -63,6 +63,7 @@ func downloadSyncHandler(ctx cli.Context) { Progress: progressWriter(args.Bool("noProgress")), Path: args.String("path"), RootId: args.String("id"), + DryRun: args.Bool("dryRun"), DeleteExtraneous: args.Bool("deleteExtraneous"), }) checkErr(err) @@ -118,6 +119,7 @@ func uploadSyncHandler(ctx cli.Context) { Progress: progressWriter(args.Bool("noProgress")), Path: args.String("path"), RootId: args.String("id"), + DryRun: args.Bool("dryRun"), DeleteExtraneous: args.Bool("deleteExtraneous"), ChunkSize: args.Int64("chunksize"), }) From 658985444c127d43e17fb760b38174aa87d88258 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Wed, 3 Feb 2016 20:55:12 +0100 Subject: [PATCH 078/195] Files are not mutated, no need to return --- drive/sync_download.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drive/sync_download.go b/drive/sync_download.go index ba86aaee..7da16bdf 100644 --- a/drive/sync_download.go +++ b/drive/sync_download.go @@ -39,7 +39,7 @@ func (self *Drive) DownloadSync(args DownloadSyncArgs) error { fmt.Fprintf(args.Out, "Found %d local files and %d remote files\n", len(files.local), len(files.remote)) // Create missing directories - files, err = self.createMissingLocalDirs(files, args) + err = self.createMissingLocalDirs(files, args) if err != nil { return err } @@ -88,7 +88,7 @@ func (self *Drive) getSyncRoot(rootId string) (*drive.File, error) { return f, nil } -func (self *Drive) createMissingLocalDirs(files *syncFiles, args DownloadSyncArgs) (*syncFiles, error) { +func (self *Drive) createMissingLocalDirs(files *syncFiles, args DownloadSyncArgs) error { missingDirs := files.filterMissingLocalDirs() missingCount := len(missingDirs) @@ -102,7 +102,7 @@ func (self *Drive) createMissingLocalDirs(files *syncFiles, args DownloadSyncArg for i, rf := range missingDirs { path, err := filepath.Abs(filepath.Join(args.Path, rf.relPath)) if err != nil { - return nil, fmt.Errorf("Failed to determine local absolute path: %s", err) + return fmt.Errorf("Failed to determine local absolute path: %s", err) } fmt.Fprintf(args.Out, "[%04d/%04d] Creating directory: %s\n", i + 1, missingCount, path) @@ -113,7 +113,7 @@ func (self *Drive) createMissingLocalDirs(files *syncFiles, args DownloadSyncArg mkdir(path) } - return files, nil + return nil } func (self *Drive) downloadMissingFiles(files *syncFiles, args DownloadSyncArgs) error { From a8c5402742a2c3c1b79febda5dac3897bc98ba6b Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Wed, 3 Feb 2016 21:39:18 +0100 Subject: [PATCH 079/195] .gdriveignore support --- drive/sync.go | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/drive/sync.go b/drive/sync.go index 7e809dc8..7b6c344c 100644 --- a/drive/sync.go +++ b/drive/sync.go @@ -5,9 +5,12 @@ import ( "os" "path/filepath" "github.com/gyuho/goraph/graph" + "github.com/sabhiram/go-git-ignore" "google.golang.org/api/drive/v3" ) +const DefaultIgnoreFile = ".gdriveignore" + func (self *Drive) prepareSyncFiles(localPath string, root *drive.File) (*syncFiles, error) { localCh := make(chan struct{files []*localFile; err error}) remoteCh := make(chan struct{files []*remoteFile; err error}) @@ -48,6 +51,12 @@ func prepareLocalFiles(root string) ([]*localFile, error) { return nil, err } + // Skip file if it is ignored by ignore file + shouldIgnore, err := prepareIgnorer(filepath.Join(absRootPath, DefaultIgnoreFile)) + if err != nil { + return nil, err + } + err = filepath.Walk(absRootPath, func(absPath string, info os.FileInfo, err error) error { if err != nil { return err @@ -63,6 +72,10 @@ func prepareLocalFiles(root string) ([]*localFile, error) { return err } + if shouldIgnore(relPath) { + return nil + } + files = append(files, &localFile{ absPath: absPath, relPath: relPath, @@ -376,3 +389,22 @@ func (self byRemotePathLength) Swap(i, j int) { func (self byRemotePathLength) Less(i, j int) bool { return pathLength(self[i].relPath) < pathLength(self[j].relPath) } + +type ignoreFunc func(string) bool + +func prepareIgnorer(path string) (ignoreFunc, error) { + acceptAll := func(string) bool { + return false + } + + if !fileExists(path) { + return acceptAll, nil + } + + ignorer, err := ignore.CompileIgnoreFile(path) + if err != nil { + return acceptAll, fmt.Errorf("Failed to prepare ignorer: %s", err) + } + + return ignorer.MatchesPath, nil +} From f43248e57561cff30111340e19543acc76148685 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Wed, 3 Feb 2016 21:41:28 +0100 Subject: [PATCH 080/195] Comments --- drive/sync.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drive/sync.go b/drive/sync.go index 7b6c344c..7b039e21 100644 --- a/drive/sync.go +++ b/drive/sync.go @@ -51,7 +51,7 @@ func prepareLocalFiles(root string) ([]*localFile, error) { return nil, err } - // Skip file if it is ignored by ignore file + // Prepare ignorer shouldIgnore, err := prepareIgnorer(filepath.Join(absRootPath, DefaultIgnoreFile)) if err != nil { return nil, err @@ -67,11 +67,13 @@ func prepareLocalFiles(root string) ([]*localFile, error) { return nil } + // Get relative path from root relPath, err := filepath.Rel(absRootPath, absPath) if err != nil { return err } + // Skip file if it is ignored by ignore file if shouldIgnore(relPath) { return nil } From dd73d460bc8fde4bbe493bf4a491ba1ab250a652 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Wed, 3 Feb 2016 21:55:09 +0100 Subject: [PATCH 081/195] Remove debug print --- cli/handler.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/cli/handler.go b/cli/handler.go index a4aafef2..d1d9d6b7 100644 --- a/cli/handler.go +++ b/cli/handler.go @@ -1,7 +1,6 @@ package cli import ( - "fmt" "regexp" "strings" ) @@ -77,7 +76,6 @@ func Handle(args []string) bool { } _, data := h.getParser().Capture(args) - fmt.Println(data) ctx := Context{ args: data, handlers: handlers, From 803981c1078d04d39bc9d30c197f0c7b02c51a9e Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 6 Feb 2016 02:12:13 +0100 Subject: [PATCH 082/195] List all files matching query --- drive/sync.go | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/drive/sync.go b/drive/sync.go index 7b039e21..0ff4cd99 100644 --- a/drive/sync.go +++ b/drive/sync.go @@ -6,7 +6,9 @@ import ( "path/filepath" "github.com/gyuho/goraph/graph" "github.com/sabhiram/go-git-ignore" + "golang.org/x/net/context" "google.golang.org/api/drive/v3" + "google.golang.org/api/googleapi" ) const DefaultIgnoreFile = ".gdriveignore" @@ -94,25 +96,37 @@ func prepareLocalFiles(root string) ([]*localFile, error) { return files, err } +func (self *Drive) listAllFiles(q string, fields []googleapi.Field) ([]*drive.File, error) { + var files []*drive.File + + err := self.service.Files.List().Q(q).Fields(fields...).PageSize(1000).Pages(context.TODO(), func(fl *drive.FileList) error { + files = append(files, fl.Files...) + return nil + }) + + return files, err +} + func (self *Drive) prepareRemoteFiles(rootDir *drive.File) ([]*remoteFile, error) { // Find all files which has rootDir as root query := fmt.Sprintf("appProperties has {key='syncRootId' and value='%s'}", rootDir.Id) - fileList, err := self.service.Files.List().Q(query).Fields("files(id,name,parents,md5Checksum,mimeType)").Do() + fields := []googleapi.Field{"nextPageToken", "files(id,name,parents,md5Checksum,mimeType)"} + files, err := self.listAllFiles(query, fields) if err != nil { return nil, fmt.Errorf("Failed listing files: %s", err) } - if err := checkFiles(fileList.Files); err != nil { + if err := checkFiles(files); err != nil { return nil, err } - relPaths, err := prepareRemoteRelPaths(rootDir.Id, fileList.Files) + relPaths, err := prepareRemoteRelPaths(rootDir, files) if err != nil { return nil, err } var remoteFiles []*remoteFile - for _, f := range fileList.Files { + for _, f := range files { relPath, ok := relPaths[f.Id] if !ok { return nil, fmt.Errorf("File %s does not have a valid parent", f.Id) From 0e39bcd0f24f0c3241e39f20388a7f268af03814 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 6 Feb 2016 02:14:20 +0100 Subject: [PATCH 083/195] Use BreadthFirstPath to find relative path between root and file Finding the shortest path was slow and unnecessary --- drive/sync.go | 71 ++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 50 insertions(+), 21 deletions(-) diff --git a/drive/sync.go b/drive/sync.go index 0ff4cd99..a0d90a2a 100644 --- a/drive/sync.go +++ b/drive/sync.go @@ -4,7 +4,7 @@ import ( "fmt" "os" "path/filepath" - "github.com/gyuho/goraph/graph" + "github.com/soniakeys/graph" "github.com/sabhiram/go-git-ignore" "golang.org/x/net/context" "google.golang.org/api/drive/v3" @@ -140,36 +140,65 @@ func (self *Drive) prepareRemoteFiles(rootDir *drive.File) ([]*remoteFile, error return remoteFiles, nil } -func prepareRemoteRelPaths(rootId string, files []*drive.File) (map[string]string, error) { - names := map[string]string{} - idGraph := graph.NewDefaultGraph() +func prepareRemoteRelPaths(root *drive.File, files []*drive.File) (map[string]string, error) { + // The graph will only hold integer values so we use + // maps to lookup file by index and index by file id + indexLookup := map[string]graph.NI{} + fileLookup := map[graph.NI]*drive.File{} - for _, f := range files { - // Store directory name for quick lookup - names[f.Id] = f.Name + // All files includes root dir + allFiles := append([]*drive.File{root}, files...) - // Store path between parent and child folder - idGraph.AddVertex(f.Id) - idGraph.AddVertex(f.Parents[0]) - idGraph.AddEdge(f.Parents[0], f.Id, 0) + // Prepare lookup maps + for i, f := range allFiles { + indexLookup[f.Id] = graph.NI(i) + fileLookup[graph.NI(i)] = f } + // Graph will hold relationship between parent and file + g := &graph.AdjacencyList{} + + // Add relationship between parent and file for all files to graph + for i, f := range allFiles { + if f == root { + continue + } + + // Lookup index of parent + parentIdx, found := indexLookup[f.Parents[0]] + if !found { + return nil, fmt.Errorf("Could not find parent of %s (%s)", f.Id, f.Name) + } + + g.AddEdge(graph.NI(parentIdx), graph.NI(i)) + } + + // This will hold a map of file id => relative path paths := map[string]string{} - for _, f := range files { - // Find path from root to directory - pathIds, _, err := graph.Dijkstra(idGraph, rootId, f.Id) - if err != nil { - return nil, err + // Find relative path from root for all files + for _, f := range allFiles { + if f == root { + continue } - // Convert path ids to path names - var pathNames []string - for _, id := range pathIds { - pathNames = append(pathNames, names[id]) + // Find nodes between root and file + nodes := g.BreadthFirstPath(0, indexLookup[f.Id]) + + // This will hold the name of all paths between root and + // file (exluding root and including file itself) + pathNames := []string{} + + // Lookup file for each node and grab name + for _, n := range nodes { + file := fileLookup[n] + if file == root { + continue + } + pathNames = append(pathNames, file.Name) } - // Store relative file path from root to directory + // Join path names to form relative path and add to map paths[f.Id] = filepath.Join(pathNames...) } From 9b9160c4ed38ac8f1e564fa2dcea0c728af75c6f Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 6 Feb 2016 09:46:41 +0100 Subject: [PATCH 084/195] Skip files that are not a directory or regular file --- drive/sync.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drive/sync.go b/drive/sync.go index a0d90a2a..c8396c16 100644 --- a/drive/sync.go +++ b/drive/sync.go @@ -69,6 +69,11 @@ func prepareLocalFiles(root string) ([]*localFile, error) { return nil } + // Skip files that are not a directory or regular file + if !info.IsDir() && !info.Mode().IsRegular() { + return nil + } + // Get relative path from root relPath, err := filepath.Rel(absRootPath, absPath) if err != nil { From 77d8e36d48c73fe1d0dc86b6736c32e5fd1f744a Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 6 Feb 2016 12:55:35 +0100 Subject: [PATCH 085/195] Update gitignore --- .gitignore | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index b0c6c0e7..ee3fac54 100644 --- a/.gitignore +++ b/.gitignore @@ -6,5 +6,4 @@ _release/bin *.un~ Session.vim .netrwhist -drive_old -foo.txt +gdrive From ce32c7536cf40afae065b09018c05da6ba221c55 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 6 Feb 2016 13:08:33 +0100 Subject: [PATCH 086/195] Give FileComparer as argument --- compare.go | 27 ++++++++++ drive/sync.go | 114 ++++++++++++++++++++++++++--------------- drive/sync_download.go | 3 +- drive/sync_upload.go | 11 ++-- drive/util.go | 14 ----- handlers_drive.go | 2 + 6 files changed, 110 insertions(+), 61 deletions(-) create mode 100644 compare.go diff --git a/compare.go b/compare.go new file mode 100644 index 00000000..7275a381 --- /dev/null +++ b/compare.go @@ -0,0 +1,27 @@ +package main + +import ( + "fmt" + "os" + "io" + "crypto/md5" + "./drive" +) + +type Md5Comparer struct {} + +func (self Md5Comparer) Changed(local *drive.LocalFile, remote *drive.RemoteFile) bool { + return remote.Md5() != md5sum(local.AbsPath()) +} + +func md5sum(path string) string { + h := md5.New() + f, err := os.Open(path) + if err != nil { + return "" + } + defer f.Close() + + io.Copy(h, f) + return fmt.Sprintf("%x", h.Sum(nil)) +} diff --git a/drive/sync.go b/drive/sync.go index c8396c16..cfd5acb7 100644 --- a/drive/sync.go +++ b/drive/sync.go @@ -1,6 +1,7 @@ package drive import ( + "time" "fmt" "os" "path/filepath" @@ -13,18 +14,18 @@ import ( const DefaultIgnoreFile = ".gdriveignore" -func (self *Drive) prepareSyncFiles(localPath string, root *drive.File) (*syncFiles, error) { - localCh := make(chan struct{files []*localFile; err error}) - remoteCh := make(chan struct{files []*remoteFile; err error}) +func (self *Drive) prepareSyncFiles(localPath string, root *drive.File, cmp FileComparer) (*syncFiles, error) { + localCh := make(chan struct{files []*LocalFile; err error}) + remoteCh := make(chan struct{files []*RemoteFile; err error}) go func() { files, err := prepareLocalFiles(localPath) - localCh <- struct{files []*localFile; err error}{files, err} + localCh <- struct{files []*LocalFile; err error}{files, err} }() go func() { files, err := self.prepareRemoteFiles(root) - remoteCh <- struct{files []*remoteFile; err error}{files, err} + remoteCh <- struct{files []*RemoteFile; err error}{files, err} }() local := <-localCh @@ -38,14 +39,15 @@ func (self *Drive) prepareSyncFiles(localPath string, root *drive.File) (*syncFi } return &syncFiles{ - root: &remoteFile{file: root}, + root: &RemoteFile{file: root}, local: local.files, remote: remote.files, + compare: cmp, }, nil } -func prepareLocalFiles(root string) ([]*localFile, error) { - var files []*localFile +func prepareLocalFiles(root string) ([]*LocalFile, error) { + var files []*LocalFile // Get absolute root path absRootPath, err := filepath.Abs(root) @@ -85,7 +87,7 @@ func prepareLocalFiles(root string) ([]*localFile, error) { return nil } - files = append(files, &localFile{ + files = append(files, &LocalFile{ absPath: absPath, relPath: relPath, info: info, @@ -112,7 +114,7 @@ func (self *Drive) listAllFiles(q string, fields []googleapi.Field) ([]*drive.Fi return files, err } -func (self *Drive) prepareRemoteFiles(rootDir *drive.File) ([]*remoteFile, error) { +func (self *Drive) prepareRemoteFiles(rootDir *drive.File) ([]*RemoteFile, error) { // Find all files which has rootDir as root query := fmt.Sprintf("appProperties has {key='syncRootId' and value='%s'}", rootDir.Id) fields := []googleapi.Field{"nextPageToken", "files(id,name,parents,md5Checksum,mimeType)"} @@ -130,13 +132,13 @@ func (self *Drive) prepareRemoteFiles(rootDir *drive.File) ([]*remoteFile, error return nil, err } - var remoteFiles []*remoteFile + var remoteFiles []*RemoteFile for _, f := range files { relPath, ok := relPaths[f.Id] if !ok { return nil, fmt.Errorf("File %s does not have a valid parent", f.Id) } - remoteFiles = append(remoteFiles, &remoteFile{ + remoteFiles = append(remoteFiles, &RemoteFile{ relPath: relPath, file: f, }) @@ -230,30 +232,60 @@ func checkFiles(files []*drive.File) error { return nil } -type localFile struct { +type LocalFile struct { absPath string relPath string info os.FileInfo } -type remoteFile struct { +type RemoteFile struct { relPath string file *drive.File } type changedFile struct { - local *localFile - remote *remoteFile + local *LocalFile + remote *RemoteFile } type syncFiles struct { - root *remoteFile - local []*localFile - remote []*remoteFile + root *RemoteFile + local []*LocalFile + remote []*RemoteFile + compare FileComparer } -func (self *syncFiles) filterMissingRemoteDirs() []*localFile { - var files []*localFile +type FileComparer interface { + Changed(*LocalFile, *RemoteFile) bool +} + +func (self LocalFile) AbsPath() string { + return self.absPath +} + +func (self LocalFile) Size() int64 { + return self.info.Size() +} + +func (self LocalFile) Modified() time.Time { + return self.info.ModTime() +} + +func (self RemoteFile) Md5() string { + return self.file.Md5Checksum +} + +func (self RemoteFile) Size() int64 { + return self.file.Size +} + +func (self RemoteFile) Modified() time.Time { + t, _ := time.Parse(time.RFC3339, self.file.ModifiedTime) + return t +} + +func (self *syncFiles) filterMissingRemoteDirs() []*LocalFile { + var files []*LocalFile for _, lf := range self.local { if lf.info.IsDir() && !self.existsRemote(lf) { @@ -264,8 +296,8 @@ func (self *syncFiles) filterMissingRemoteDirs() []*localFile { return files } -func (self *syncFiles) filterMissingLocalDirs() []*remoteFile { - var files []*remoteFile +func (self *syncFiles) filterMissingLocalDirs() []*RemoteFile { + var files []*RemoteFile for _, rf := range self.remote { if isDir(rf.file) && !self.existsLocal(rf) { @@ -276,8 +308,8 @@ func (self *syncFiles) filterMissingLocalDirs() []*remoteFile { return files } -func (self *syncFiles) filterMissingRemoteFiles() []*localFile { - var files []*localFile +func (self *syncFiles) filterMissingRemoteFiles() []*LocalFile { + var files []*LocalFile for _, lf := range self.local { if !lf.info.IsDir() && !self.existsRemote(lf) { @@ -288,8 +320,8 @@ func (self *syncFiles) filterMissingRemoteFiles() []*localFile { return files } -func (self *syncFiles) filterMissingLocalFiles() []*remoteFile { - var files []*remoteFile +func (self *syncFiles) filterMissingLocalFiles() []*RemoteFile { + var files []*RemoteFile for _, rf := range self.remote { if !isDir(rf.file) && !self.existsLocal(rf) { @@ -315,8 +347,8 @@ func (self *syncFiles) filterChangedLocalFiles() []*changedFile { continue } - // Add files where remote md5 sum does not match local - if rf.file.Md5Checksum != md5sum(lf.absPath) { + // Check if file has changed + if self.compare.Changed(lf, rf) { files = append(files, &changedFile{ local: lf, remote: rf, @@ -342,8 +374,8 @@ func (self *syncFiles) filterChangedRemoteFiles() []*changedFile { continue } - // Add files where remote md5 sum does not match local - if rf.file.Md5Checksum != md5sum(lf.absPath) { + // Check if file has changed + if self.compare.Changed(lf, rf) { files = append(files, &changedFile{ local: lf, remote: rf, @@ -354,8 +386,8 @@ func (self *syncFiles) filterChangedRemoteFiles() []*changedFile { return files } -func (self *syncFiles) filterExtraneousRemoteFiles() []*remoteFile { - var files []*remoteFile +func (self *syncFiles) filterExtraneousRemoteFiles() []*RemoteFile { + var files []*RemoteFile for _, rf := range self.remote { if !self.existsLocal(rf) { @@ -366,8 +398,8 @@ func (self *syncFiles) filterExtraneousRemoteFiles() []*remoteFile { return files } -func (self *syncFiles) filterExtraneousLocalFiles() []*localFile { - var files []*localFile +func (self *syncFiles) filterExtraneousLocalFiles() []*LocalFile { + var files []*LocalFile for _, lf := range self.local { if !self.existsRemote(lf) { @@ -378,17 +410,17 @@ func (self *syncFiles) filterExtraneousLocalFiles() []*localFile { return files } -func (self *syncFiles) existsRemote(lf *localFile) bool { +func (self *syncFiles) existsRemote(lf *LocalFile) bool { _, found := self.findRemoteByPath(lf.relPath) return found } -func (self *syncFiles) existsLocal(rf *remoteFile) bool { +func (self *syncFiles) existsLocal(rf *RemoteFile) bool { _, found := self.findLocalByPath(rf.relPath) return found } -func (self *syncFiles) findRemoteByPath(relPath string) (*remoteFile, bool) { +func (self *syncFiles) findRemoteByPath(relPath string) (*RemoteFile, bool) { if relPath == "." { return self.root, true } @@ -402,7 +434,7 @@ func (self *syncFiles) findRemoteByPath(relPath string) (*remoteFile, bool) { return nil, false } -func (self *syncFiles) findLocalByPath(relPath string) (*localFile, bool) { +func (self *syncFiles) findLocalByPath(relPath string) (*LocalFile, bool) { for _, lf := range self.local { if relPath == lf.relPath { return lf, true @@ -412,7 +444,7 @@ func (self *syncFiles) findLocalByPath(relPath string) (*localFile, bool) { return nil, false } -type byLocalPathLength []*localFile +type byLocalPathLength []*LocalFile func (self byLocalPathLength) Len() int { return len(self) @@ -426,7 +458,7 @@ func (self byLocalPathLength) Less(i, j int) bool { return pathLength(self[i].relPath) < pathLength(self[j].relPath) } -type byRemotePathLength []*remoteFile +type byRemotePathLength []*RemoteFile func (self byRemotePathLength) Len() int { return len(self) diff --git a/drive/sync_download.go b/drive/sync_download.go index 7da16bdf..f18cb18a 100644 --- a/drive/sync_download.go +++ b/drive/sync_download.go @@ -18,6 +18,7 @@ type DownloadSyncArgs struct { Path string DryRun bool DeleteExtraneous bool + Comparer FileComparer } func (self *Drive) DownloadSync(args DownloadSyncArgs) error { @@ -31,7 +32,7 @@ func (self *Drive) DownloadSync(args DownloadSyncArgs) error { } fmt.Fprintln(args.Out, "Collecting local and remote file information...") - files, err := self.prepareSyncFiles(args.Path, rootDir) + files, err := self.prepareSyncFiles(args.Path, rootDir, args.Comparer) if err != nil { return err } diff --git a/drive/sync_upload.go b/drive/sync_upload.go index 151a9f83..445ed425 100644 --- a/drive/sync_upload.go +++ b/drive/sync_upload.go @@ -19,6 +19,7 @@ type UploadSyncArgs struct { DryRun bool DeleteExtraneous bool ChunkSize int64 + Comparer FileComparer } func (self *Drive) UploadSync(args UploadSyncArgs) error { @@ -36,7 +37,7 @@ func (self *Drive) UploadSync(args UploadSyncArgs) error { } fmt.Fprintln(args.Out, "Collecting local and remote file information...") - files, err := self.prepareSyncFiles(args.Path, rootDir) + files, err := self.prepareSyncFiles(args.Path, rootDir, args.Comparer) if err != nil { return err } @@ -143,7 +144,7 @@ func (self *Drive) createMissingRemoteDirs(files *syncFiles, args UploadSyncArgs fmt.Fprintf(args.Out, "[%04d/%04d] Creating directory: %s\n", i + 1, missingCount, filepath.Join(files.root.file.Name, lf.relPath)) if args.DryRun { - files.remote = append(files.remote, &remoteFile{ + files.remote = append(files.remote, &RemoteFile{ relPath: lf.relPath, file: dstFile, }) @@ -153,7 +154,7 @@ func (self *Drive) createMissingRemoteDirs(files *syncFiles, args UploadSyncArgs return nil, fmt.Errorf("Failed to create directory: %s", err) } - files.remote = append(files.remote, &remoteFile{ + files.remote = append(files.remote, &RemoteFile{ relPath: lf.relPath, file: f, }) @@ -244,7 +245,7 @@ func (self *Drive) deleteExtraneousRemoteFiles(files *syncFiles, args UploadSync return nil } -func (self *Drive) uploadMissingFile(parentId string, lf *localFile, args UploadSyncArgs) error { +func (self *Drive) uploadMissingFile(parentId string, lf *LocalFile, args UploadSyncArgs) error { srcFile, err := os.Open(lf.absPath) if err != nil { return fmt.Errorf("Failed to open file: %s", err) @@ -300,7 +301,7 @@ func (self *Drive) updateChangedFile(cf *changedFile, args UploadSyncArgs) error return nil } -func (self *Drive) deleteRemoteFile(rf *remoteFile, args UploadSyncArgs) error { +func (self *Drive) deleteRemoteFile(rf *RemoteFile, args UploadSyncArgs) error { err := self.service.Files.Delete(rf.file.Id).Do() if err != nil { return fmt.Errorf("Failed to delete file: %s", err) diff --git a/drive/util.go b/drive/util.go index 1c430095..2116f9b3 100644 --- a/drive/util.go +++ b/drive/util.go @@ -9,8 +9,6 @@ import ( "unicode/utf8" "math" "time" - "crypto/md5" - "io" ) type kv struct { @@ -137,18 +135,6 @@ func intMax() int64 { return 1 << (strconv.IntSize - 1) - 1 } -func md5sum(path string) string { - h := md5.New() - f, err := os.Open(path) - if err != nil { - return "" - } - defer f.Close() - - io.Copy(h, f) - return fmt.Sprintf("%x", h.Sum(nil)) -} - func pathLength(path string) int { return strings.Count(path, string(os.PathSeparator)) } diff --git a/handlers_drive.go b/handlers_drive.go index 8e7540c7..029b47ee 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -65,6 +65,7 @@ func downloadSyncHandler(ctx cli.Context) { RootId: args.String("id"), DryRun: args.Bool("dryRun"), DeleteExtraneous: args.Bool("deleteExtraneous"), + Comparer: Md5Comparer{}, }) checkErr(err) } @@ -122,6 +123,7 @@ func uploadSyncHandler(ctx cli.Context) { DryRun: args.Bool("dryRun"), DeleteExtraneous: args.Bool("deleteExtraneous"), ChunkSize: args.Int64("chunksize"), + Comparer: Md5Comparer{}, }) checkErr(err) } From b7e45b080ffe13d286be65ce396da47b36a16944 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 6 Feb 2016 15:07:24 +0100 Subject: [PATCH 087/195] Retry file upload on backend error --- drive/errors.go | 22 ++++++++++++++++++++++ drive/sync_upload.go | 12 +++++++++--- drive/util.go | 5 +++++ 3 files changed, 36 insertions(+), 3 deletions(-) create mode 100644 drive/errors.go diff --git a/drive/errors.go b/drive/errors.go new file mode 100644 index 00000000..703dae53 --- /dev/null +++ b/drive/errors.go @@ -0,0 +1,22 @@ +package drive + +import ( + "google.golang.org/api/googleapi" + "time" +) + +const MaxBackendErrorRetries = 5 + +func isBackendError(err error) bool { + if err == nil { + return false + } + + ae, ok := err.(*googleapi.Error) + return ok && ae.Code >= 500 && ae.Code <= 599 +} + +func exponentialBackoffSleep(try int) { + seconds := pow(2, try) + time.Sleep(time.Duration(seconds) * time.Second) +} diff --git a/drive/sync_upload.go b/drive/sync_upload.go index 445ed425..c4df1e85 100644 --- a/drive/sync_upload.go +++ b/drive/sync_upload.go @@ -185,7 +185,7 @@ func (self *Drive) uploadMissingFiles(files *syncFiles, args UploadSyncArgs) err continue } - err := self.uploadMissingFile(parent.file.Id, lf, args) + err := self.uploadMissingFile(parent.file.Id, lf, args, 0) if err != nil { return err } @@ -245,7 +245,7 @@ func (self *Drive) deleteExtraneousRemoteFiles(files *syncFiles, args UploadSync return nil } -func (self *Drive) uploadMissingFile(parentId string, lf *LocalFile, args UploadSyncArgs) error { +func (self *Drive) uploadMissingFile(parentId string, lf *LocalFile, args UploadSyncArgs, try int) error { srcFile, err := os.Open(lf.absPath) if err != nil { return fmt.Errorf("Failed to open file: %s", err) @@ -269,7 +269,13 @@ func (self *Drive) uploadMissingFile(parentId string, lf *LocalFile, args Upload _, err = self.service.Files.Create(dstFile).Fields("id", "name", "size", "md5Checksum").Media(srcReader, chunkSize).Do() if err != nil { - return fmt.Errorf("Failed to upload file: %s", err) + if isBackendError(err) && try < MaxBackendErrorRetries { + exponentialBackoffSleep(try) + try++ + self.uploadMissingFile(parentId, lf, args, try) + } else { + return fmt.Errorf("Failed to upload file: %s", err) + } } return nil diff --git a/drive/util.go b/drive/util.go index 2116f9b3..481a48e8 100644 --- a/drive/util.go +++ b/drive/util.go @@ -143,3 +143,8 @@ func parentFilePath(path string) string { dir, _ := filepath.Split(path) return filepath.Dir(dir) } + +func pow(x int, y int) int { + f := math.Pow(float64(x), float64(y)) + return int(f) +} From 69fb273d2f99604aa3fb80e88fb58f48fc1998c0 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 6 Feb 2016 19:33:42 +0100 Subject: [PATCH 088/195] CachedMd5Comparer --- compare.go | 67 ++++++++++++++++++++++++++++++++++++++++------- handlers_drive.go | 8 ++++-- util.go | 32 ++++++++++++++++++++++ 3 files changed, 95 insertions(+), 12 deletions(-) diff --git a/compare.go b/compare.go index 7275a381..10cab3c0 100644 --- a/compare.go +++ b/compare.go @@ -1,27 +1,74 @@ package main import ( - "fmt" "os" - "io" - "crypto/md5" + "encoding/json" "./drive" ) +const MinCacheFileSize = 5 * 1024 * 1024 + type Md5Comparer struct {} func (self Md5Comparer) Changed(local *drive.LocalFile, remote *drive.RemoteFile) bool { return remote.Md5() != md5sum(local.AbsPath()) } -func md5sum(path string) string { - h := md5.New() +type CachedFileInfo struct { + Size int64 `json:"size"` + Modified int64 `json:"modified"` + Md5 string `json:"md5"` +} + +func NewCachedMd5Comparer(path string) CachedMd5Comparer { + cache := map[string]*CachedFileInfo{} + f, err := os.Open(path) - if err != nil { - return "" + if err == nil { + json.NewDecoder(f).Decode(&cache) + } + f.Close() + return CachedMd5Comparer{path, cache} +} + +type CachedMd5Comparer struct { + path string + cache map[string]*CachedFileInfo +} + +func (self CachedMd5Comparer) Changed(local *drive.LocalFile, remote *drive.RemoteFile) bool { + return remote.Md5() != self.md5(local) +} + +func (self CachedMd5Comparer) md5(local *drive.LocalFile) string { + // See if file exist in cache + cached, found := self.cache[local.AbsPath()] + + // If found and modification time and size has not changed, return cached md5 + if found && local.Modified().UnixNano() == cached.Modified && local.Size() == cached.Size { + return cached.Md5 + } + + // Calculate new md5 sum + md5 := md5sum(local.AbsPath()) + + // Cache file info if file meets size criteria + if local.Size() > MinCacheFileSize { + self.cacheAdd(local, md5) + self.persist() } - defer f.Close() - io.Copy(h, f) - return fmt.Sprintf("%x", h.Sum(nil)) + return md5 +} + +func (self CachedMd5Comparer) cacheAdd(lf *drive.LocalFile, md5 string) { + self.cache[lf.AbsPath()] = &CachedFileInfo{ + Size: lf.Size(), + Modified: lf.Modified().UnixNano(), + Md5: md5, + } +} + +func (self CachedMd5Comparer) persist() { + writeJson(self.path, self.cache) } diff --git a/handlers_drive.go b/handlers_drive.go index 029b47ee..9de27cc2 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -5,6 +5,7 @@ import ( "os" "io" "io/ioutil" + "path/filepath" "./cli" "./auth" "./drive" @@ -13,6 +14,7 @@ import ( const ClientId = "367116221053-7n0vf5akeru7on6o2fjinrecpdoe99eg.apps.googleusercontent.com" const ClientSecret = "1qsNodXNaWq1mQuBjUjmvhoO" const TokenFilename = "token_v2.json" +const DefaultCacheFileName = "file_cache.json" func listHandler(ctx cli.Context) { @@ -58,6 +60,7 @@ func downloadHandler(ctx cli.Context) { func downloadSyncHandler(ctx cli.Context) { args := ctx.Args() + cachePath := filepath.Join(args.String("configDir"), DefaultCacheFileName) err := newDrive(args).DownloadSync(drive.DownloadSyncArgs{ Out: os.Stdout, Progress: progressWriter(args.Bool("noProgress")), @@ -65,7 +68,7 @@ func downloadSyncHandler(ctx cli.Context) { RootId: args.String("id"), DryRun: args.Bool("dryRun"), DeleteExtraneous: args.Bool("deleteExtraneous"), - Comparer: Md5Comparer{}, + Comparer: NewCachedMd5Comparer(cachePath), }) checkErr(err) } @@ -115,6 +118,7 @@ func uploadStdinHandler(ctx cli.Context) { func uploadSyncHandler(ctx cli.Context) { args := ctx.Args() + cachePath := filepath.Join(args.String("configDir"), DefaultCacheFileName) err := newDrive(args).UploadSync(drive.UploadSyncArgs{ Out: os.Stdout, Progress: progressWriter(args.Bool("noProgress")), @@ -123,7 +127,7 @@ func uploadSyncHandler(ctx cli.Context) { DryRun: args.Bool("dryRun"), DeleteExtraneous: args.Bool("deleteExtraneous"), ChunkSize: args.Int64("chunksize"), - Comparer: Md5Comparer{}, + Comparer: NewCachedMd5Comparer(cachePath), }) checkErr(err) } diff --git a/util.go b/util.go index d40d33e0..041daed0 100644 --- a/util.go +++ b/util.go @@ -4,7 +4,10 @@ import ( "runtime" "path/filepath" "fmt" + "encoding/json" "os" + "io" + "crypto/md5" ) func GetDefaultConfigDir() string { @@ -56,3 +59,32 @@ func checkErr(err error) { os.Exit(1) } } + +func writeJson(path string, data interface{}) error { + tmpFile := path + ".tmp" + f, err := os.Create(tmpFile) + if err != nil { + return err + } + + err = json.NewEncoder(f).Encode(data) + f.Close() + if err != nil { + os.Remove(tmpFile) + return err + } + + return os.Rename(tmpFile, path) +} + +func md5sum(path string) string { + h := md5.New() + f, err := os.Open(path) + if err != nil { + return "" + } + defer f.Close() + + io.Copy(h, f) + return fmt.Sprintf("%x", h.Sum(nil)) +} From d8a9719d81f50ac941a67285e29c64202056cf7e Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 6 Feb 2016 21:53:34 +0100 Subject: [PATCH 089/195] Write to temp file first --- auth/token.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/auth/token.go b/auth/token.go index 926d9f68..1c1150b4 100644 --- a/auth/token.go +++ b/auth/token.go @@ -3,6 +3,7 @@ package auth import ( "golang.org/x/oauth2" "encoding/json" + "os" "io/ioutil" ) @@ -53,5 +54,15 @@ func SaveToken(path string, token *oauth2.Token) error { if err = mkdir(path); err != nil { return err } - return ioutil.WriteFile(path, data, 0600) + + // Write to temp file first + tmpFile := path + ".tmp" + err = ioutil.WriteFile(tmpFile, data, 0600) + if err != nil { + os.Remove(tmpFile) + return err + } + + // Move file to correct path + return os.Rename(tmpFile, path) } From bc5809bbdee7bcbc32a71e58964f1f3a922b3c5d Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 6 Feb 2016 22:20:40 +0100 Subject: [PATCH 090/195] Require recursive flag to delete directories --- drive/delete.go | 13 +++++++++---- gdrive.go | 10 +++++++++- handlers_drive.go | 1 + 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/drive/delete.go b/drive/delete.go index dc687538..d2469e1c 100644 --- a/drive/delete.go +++ b/drive/delete.go @@ -8,19 +8,24 @@ import ( type DeleteArgs struct { Out io.Writer Id string + Recursive bool } -func (self *Drive) Delete(args DeleteArgs) (err error) { - f, err := self.service.Files.Get(args.Id).Fields("name").Do() +func (self *Drive) Delete(args DeleteArgs) error { + f, err := self.service.Files.Get(args.Id).Fields("name", "mimeType").Do() if err != nil { return fmt.Errorf("Failed to get file: %s", err) } + if isDir(f) && !args.Recursive { + return fmt.Errorf("'%s' is a directory, use the 'recursive' flag to delete directories", f.Name) + } + err = self.service.Files.Delete(args.Id).Do() if err != nil { return fmt.Errorf("Failed to delete file: %s", err) } - fmt.Fprintf(args.Out, "Removed file '%s'\n", f.Name) - return + fmt.Fprintf(args.Out, "Deleted '%s'\n", f.Name) + return nil } diff --git a/gdrive.go b/gdrive.go index 67147481..0a90f8d0 100644 --- a/gdrive.go +++ b/gdrive.go @@ -321,11 +321,19 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global] delete ", + Pattern: "[global] delete [options] ", Description: "Delete file or directory", Callback: deleteHandler, Flags: cli.Flags{ "global": globalFlags, + "options": []cli.Flag{ + cli.BoolFlag{ + Name: "recursive", + Patterns: []string{"-r", "--recursive"}, + Description: "Delete directory and all it's content", + OmitValue: true, + }, + }, }, }, &cli.Handler{ diff --git a/handlers_drive.go b/handlers_drive.go index 9de27cc2..714493c8 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -225,6 +225,7 @@ func deleteHandler(ctx cli.Context) { err := newDrive(args).Delete(drive.DeleteArgs{ Out: os.Stdout, Id: args.String("id"), + Recursive: args.Bool("recursive"), }) checkErr(err) } From a2bb1bb7868f5444119c17ce38102b7370411b87 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 6 Feb 2016 22:28:48 +0100 Subject: [PATCH 091/195] Indicate which flags needs an argument --- handlers_meta.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/handlers_meta.go b/handlers_meta.go index 6394cb30..637036c7 100644 --- a/handlers_meta.go +++ b/handlers_meta.go @@ -39,7 +39,12 @@ func printCommandPrefixHelp(ctx cli.Context, prefix ...string) { for name, flags := range handler.Flags { fmt.Printf("\n%s:\n", name) for _, flag := range flags { - fmt.Printf(" %s (%s)\n", strings.Join(flag.GetPatterns(), ", "), flag.GetDescription()) + boolFlag, isBool := flag.(cli.BoolFlag) + if isBool && boolFlag.OmitValue { + fmt.Printf(" %s (%s)\n", strings.Join(flag.GetPatterns(), ", "), flag.GetDescription()) + } else { + fmt.Printf(" %s <%s> (%s)\n", strings.Join(flag.GetPatterns(), ", "), flag.GetName(), flag.GetDescription()) + } } } } From 0535cc04bc89943cd585f2f4d7f774ec1b8cc683 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 6 Feb 2016 23:25:29 +0100 Subject: [PATCH 092/195] Use FlagGroup instead of map --- cli/handler.go | 32 ++++++++-- gdrive.go | 148 +++++++++++++++++++++++------------------------ handlers_meta.go | 6 +- 3 files changed, 104 insertions(+), 82 deletions(-) diff --git a/cli/handler.go b/cli/handler.go index d1d9d6b7..b196f535 100644 --- a/cli/handler.go +++ b/cli/handler.go @@ -5,13 +5,35 @@ import ( "strings" ) -type Flags map[string][]Flag +func NewFlagGroup(name string, flags...Flag) FlagGroup { + return FlagGroup{ + Name: name, + Flags: flags, + } +} + +type FlagGroup struct { + Name string + Flags []Flag +} + +type FlagGroups []FlagGroup + +func (groups FlagGroups) getFlags(name string) []Flag { + for _, group := range groups { + if group.Name == name { + return group.Flags + } + } + + return nil +} var handlers []*Handler type Handler struct { Pattern string - Flags Flags + FlagGroups FlagGroups Callback func(Context) Description string } @@ -22,7 +44,7 @@ func (self *Handler) getParser() Parser { for _, pattern := range self.SplitPattern() { if isOptional(pattern) { name := optionalName(pattern) - parser := getFlagParser(self.Flags[name]) + parser := getFlagParser(self.FlagGroups.getFlags(name)) parsers = append(parsers, parser) } else if isCaptureGroup(pattern) { parsers = append(parsers, CaptureGroupParser{pattern}) @@ -50,10 +72,10 @@ func SetHandlers(h []*Handler) { handlers = h } -func AddHandler(pattern string, flags Flags, callback func(Context), desc string) { +func AddHandler(pattern string, groups FlagGroups, callback func(Context), desc string) { handlers = append(handlers, &Handler{ Pattern: pattern, - Flags: flags, + FlagGroups: groups, Callback: callback, Description: desc, }) diff --git a/gdrive.go b/gdrive.go index 0a90f8d0..41e6a54a 100644 --- a/gdrive.go +++ b/gdrive.go @@ -34,9 +34,9 @@ func main() { Pattern: "[global] list [options]", Description: "List files", Callback: listHandler, - Flags: cli.Flags{ - "global": globalFlags, - "options": []cli.Flag{ + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", cli.IntFlag{ Name: "maxFiles", Patterns: []string{"-m", "--max"}, @@ -72,16 +72,16 @@ func main() { Description: "Size in bytes", OmitValue: true, }, - }, + ), }, }, &cli.Handler{ Pattern: "[global] download [options] ", Description: "Download file or directory", Callback: downloadHandler, - Flags: cli.Flags{ - "global": globalFlags, - "options": []cli.Flag{ + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", cli.BoolFlag{ Name: "force", Patterns: []string{"-f", "--force"}, @@ -111,16 +111,16 @@ func main() { Description: "Write file content to stdout", OmitValue: true, }, - }, + ), }, }, &cli.Handler{ Pattern: "[global] upload [options] ", Description: "Upload file or directory", Callback: uploadHandler, - Flags: cli.Flags{ - "global": globalFlags, - "options": []cli.Flag{ + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", cli.BoolFlag{ Name: "recursive", Patterns: []string{"-r", "--recursive"}, @@ -160,16 +160,16 @@ func main() { Description: fmt.Sprintf("Set chunk size in bytes, default: %d", DefaultUploadChunkSize), DefaultValue: DefaultUploadChunkSize, }, - }, + ), }, }, &cli.Handler{ Pattern: "[global] upload - [options] ", Description: "Upload file from stdin", Callback: uploadStdinHandler, - Flags: cli.Flags{ - "global": globalFlags, - "options": []cli.Flag{ + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", cli.StringSliceFlag{ Name: "parent", Patterns: []string{"-p", "--parent"}, @@ -192,16 +192,16 @@ func main() { Description: "Share file", OmitValue: true, }, - }, + ), }, }, &cli.Handler{ Pattern: "[global] update [options] ", Description: "Update file, this creates a new revision of the file", Callback: updateHandler, - Flags: cli.Flags{ - "global": globalFlags, - "options": []cli.Flag{ + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", cli.StringSliceFlag{ Name: "parent", Patterns: []string{"-p", "--parent"}, @@ -241,32 +241,32 @@ func main() { Description: fmt.Sprintf("Set chunk size in bytes, default: %d", DefaultUploadChunkSize), DefaultValue: DefaultUploadChunkSize, }, - }, + ), }, }, &cli.Handler{ Pattern: "[global] info [options] ", Description: "Show file info", Callback: infoHandler, - Flags: cli.Flags{ - "global": globalFlags, - "options": []cli.Flag{ + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", cli.BoolFlag{ Name: "sizeInBytes", Patterns: []string{"--bytes"}, Description: "Show size in bytes", OmitValue: true, }, - }, + ), }, }, &cli.Handler{ Pattern: "[global] mkdir [options] ", Description: "Create directory", Callback: mkdirHandler, - Flags: cli.Flags{ - "global": globalFlags, - "options": []cli.Flag{ + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", cli.StringSliceFlag{ Name: "parent", Patterns: []string{"-p", "--parent"}, @@ -278,16 +278,16 @@ func main() { Description: "Share created directory", OmitValue: true, }, - }, + ), }, }, &cli.Handler{ Pattern: "[global] share [options] ", Description: "Share file or directory", Callback: shareHandler, - Flags: cli.Flags{ - "global": globalFlags, - "options": []cli.Flag{ + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", cli.BoolFlag{ Name: "discoverable", Patterns: []string{"--discoverable"}, @@ -317,32 +317,32 @@ func main() { Description: "Delete all sharing permissions", OmitValue: true, }, - }, + ), }, }, &cli.Handler{ Pattern: "[global] delete [options] ", Description: "Delete file or directory", Callback: deleteHandler, - Flags: cli.Flags{ - "global": globalFlags, - "options": []cli.Flag{ + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", cli.BoolFlag{ Name: "recursive", Patterns: []string{"-r", "--recursive"}, Description: "Delete directory and all it's content", OmitValue: true, }, - }, + ), }, }, &cli.Handler{ Pattern: "[global] sync download [options] ", Description: "Sync drive directory to local directory", Callback: downloadSyncHandler, - Flags: cli.Flags{ - "global": globalFlags, - "options": []cli.Flag{ + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", cli.BoolFlag{ Name: "noProgress", Patterns: []string{"--no-progress"}, @@ -361,16 +361,16 @@ func main() { Description: "Delete extraneous local files", OmitValue: true, }, - }, + ), }, }, &cli.Handler{ Pattern: "[global] sync upload [options] ", Description: "Sync local directory to drive", Callback: uploadSyncHandler, - Flags: cli.Flags{ - "global": globalFlags, - "options": []cli.Flag{ + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", cli.BoolFlag{ Name: "dryRun", Patterns: []string{"--dry-run"}, @@ -395,16 +395,16 @@ func main() { Description: fmt.Sprintf("Set chunk size in bytes, default: %d", DefaultUploadChunkSize), DefaultValue: DefaultUploadChunkSize, }, - }, + ), }, }, &cli.Handler{ Pattern: "[global] changes [options]", Description: "List file changes", Callback: listChangesHandler, - Flags: cli.Flags{ - "global": globalFlags, - "options": []cli.Flag{ + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", cli.IntFlag{ Name: "maxChanges", Patterns: []string{"-m", "--max"}, @@ -435,16 +435,16 @@ func main() { Description: "Dont print the header", OmitValue: true, }, - }, + ), }, }, &cli.Handler{ Pattern: "[global] revision list [options] ", Description: "List file revisions", Callback: listRevisionsHandler, - Flags: cli.Flags{ - "global": globalFlags, - "options": []cli.Flag{ + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", cli.IntFlag{ Name: "nameWidth", Patterns: []string{"--name-width"}, @@ -463,16 +463,16 @@ func main() { Description: "Size in bytes", OmitValue: true, }, - }, + ), }, }, &cli.Handler{ Pattern: "[global] revision download [options] ", Description: "Download revision", Callback: downloadRevisionHandler, - Flags: cli.Flags{ - "global": globalFlags, - "options": []cli.Flag{ + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", cli.BoolFlag{ Name: "force", Patterns: []string{"-f", "--force"}, @@ -491,24 +491,24 @@ func main() { Description: "Write file content to stdout", OmitValue: true, }, - }, + ), }, }, &cli.Handler{ Pattern: "[global] revision delete ", Description: "Delete file revision", Callback: deleteRevisionHandler, - Flags: cli.Flags{ - "global": globalFlags, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), }, }, &cli.Handler{ Pattern: "[global] import [options] ", Description: "Upload and convert file to a google document, see 'about import' for available conversions", Callback: importHandler, - Flags: cli.Flags{ - "global": globalFlags, - "options": []cli.Flag{ + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", cli.StringSliceFlag{ Name: "parent", Patterns: []string{"-p", "--parent"}, @@ -526,16 +526,16 @@ func main() { Description: "Share file", OmitValue: true, }, - }, + ), }, }, &cli.Handler{ Pattern: "[global] export [options] ", Description: "Export a google document", Callback: exportHandler, - Flags: cli.Flags{ - "global": globalFlags, - "options": []cli.Flag{ + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", cli.BoolFlag{ Name: "force", Patterns: []string{"-f", "--force"}, @@ -553,39 +553,39 @@ func main() { Description: "Print available mime types for given file", OmitValue: true, }, - }, + ), }, }, &cli.Handler{ Pattern: "[global] about [options]", Description: "Google drive metadata, quota usage", Callback: aboutHandler, - Flags: cli.Flags{ - "global": globalFlags, - "options": []cli.Flag{ + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", cli.BoolFlag{ Name: "sizeInBytes", Patterns: []string{"--bytes"}, Description: "Show size in bytes", OmitValue: true, }, - }, + ), }, }, &cli.Handler{ Pattern: "[global] about import", Description: "Show supported import formats", Callback: aboutImportHandler, - Flags: cli.Flags{ - "global": globalFlags, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), }, }, &cli.Handler{ Pattern: "[global] about export", Description: "Show supported export formats", Callback: aboutExportHandler, - Flags: cli.Flags{ - "global": globalFlags, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), }, }, &cli.Handler{ diff --git a/handlers_meta.go b/handlers_meta.go index 637036c7..67d80ca9 100644 --- a/handlers_meta.go +++ b/handlers_meta.go @@ -36,9 +36,9 @@ func printCommandPrefixHelp(ctx cli.Context, prefix ...string) { } fmt.Printf("%s %s (%s)\n", Name, handler.Pattern, handler.Description) - for name, flags := range handler.Flags { - fmt.Printf("\n%s:\n", name) - for _, flag := range flags { + for _, group := range handler.FlagGroups { + fmt.Printf("\n%s:\n", group.Name) + for _, flag := range group.Flags { boolFlag, isBool := flag.(cli.BoolFlag) if isBool && boolFlag.OmitValue { fmt.Printf(" %s (%s)\n", strings.Join(flag.GetPatterns(), ", "), flag.GetDescription()) From 0ede2d49e12ddc3a6c2f270f2a74fb938c821e12 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 6 Feb 2016 23:30:39 +0100 Subject: [PATCH 093/195] optional -> flagGroup --- cli/handler.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cli/handler.go b/cli/handler.go index b196f535..a1a72576 100644 --- a/cli/handler.go +++ b/cli/handler.go @@ -42,10 +42,10 @@ func (self *Handler) getParser() Parser { var parsers []Parser for _, pattern := range self.SplitPattern() { - if isOptional(pattern) { - name := optionalName(pattern) - parser := getFlagParser(self.FlagGroups.getFlags(name)) - parsers = append(parsers, parser) + if isFlagGroup(pattern) { + groupName := flagGroupName(pattern) + flags := self.FlagGroups.getFlags(groupName) + parsers = append(parsers, getFlagParser(flags)) } else if isCaptureGroup(pattern) { parsers = append(parsers, CaptureGroupParser{pattern}) } else { @@ -110,10 +110,10 @@ func isCaptureGroup(arg string) bool { return strings.HasPrefix(arg, "<") && strings.HasSuffix(arg, ">") } -func isOptional(arg string) bool { +func isFlagGroup(arg string) bool { return strings.HasPrefix(arg, "[") && strings.HasSuffix(arg, "]") } -func optionalName(s string) string { +func flagGroupName(s string) string { return s[1:len(s) - 1] } From 1226727a2edb372fb4daf449e62dcd7fb72b7370 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 6 Feb 2016 23:59:15 +0100 Subject: [PATCH 094/195] Add sync list command --- drive/sync_list.go | 44 ++++++++++++++++++++++++++++++++++++++++++++ gdrive.go | 16 ++++++++++++++++ handlers_drive.go | 9 +++++++++ 3 files changed, 69 insertions(+) create mode 100644 drive/sync_list.go diff --git a/drive/sync_list.go b/drive/sync_list.go new file mode 100644 index 00000000..beb644e3 --- /dev/null +++ b/drive/sync_list.go @@ -0,0 +1,44 @@ +package drive + +import ( + "fmt" + "io" + "text/tabwriter" + "google.golang.org/api/googleapi" + "google.golang.org/api/drive/v3" +) + +type ListSyncArgs struct { + Out io.Writer + SkipHeader bool +} + +func (self *Drive) ListSync(args ListSyncArgs) error { + query := fmt.Sprintf("appProperties has {key='isSyncRoot' and value='true'}") + fields := []googleapi.Field{"nextPageToken", "files(id,name,mimeType,createdTime)"} + files, err := self.listAllFiles(query, fields) + if err != nil { + return err + } + printSyncDirectories(files, args) + return nil +} + +func printSyncDirectories(files []*drive.File, args ListSyncArgs) { + w := new(tabwriter.Writer) + w.Init(args.Out, 0, 0, 3, ' ', 0) + + if !args.SkipHeader { + fmt.Fprintln(w, "Id\tName\tCreated") + } + + for _, f := range files { + fmt.Fprintf(w, "%s\t%s\t%s\n", + f.Id, + f.Name, + formatDatetime(f.CreatedTime), + ) + } + + w.Flush() +} diff --git a/gdrive.go b/gdrive.go index 41e6a54a..d906ada1 100644 --- a/gdrive.go +++ b/gdrive.go @@ -336,6 +336,22 @@ func main() { ), }, }, + &cli.Handler{ + Pattern: "[global] sync list [options]", + Description: "List all syncable directories on drive", + Callback: listSyncHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", + cli.BoolFlag{ + Name: "skipHeader", + Patterns: []string{"--no-header"}, + Description: "Dont print the header", + OmitValue: true, + }, + ), + }, + }, &cli.Handler{ Pattern: "[global] sync download [options] ", Description: "Sync drive directory to local directory", diff --git a/handlers_drive.go b/handlers_drive.go index 714493c8..71f5201b 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -230,6 +230,15 @@ func deleteHandler(ctx cli.Context) { checkErr(err) } +func listSyncHandler(ctx cli.Context) { + args := ctx.Args() + err := newDrive(args).ListSync(drive.ListSyncArgs{ + Out: os.Stdout, + SkipHeader: args.Bool("skipHeader"), + }) + checkErr(err) +} + func deleteRevisionHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).DeleteRevision(drive.DeleteRevisionArgs{ From 68dccc08494aff71945735c8cf4e4158fc939254 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 7 Feb 2016 12:55:10 +0100 Subject: [PATCH 095/195] Use parent pointer tree --- drive/sync.go | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/drive/sync.go b/drive/sync.go index cfd5acb7..3117e84e 100644 --- a/drive/sync.go +++ b/drive/sync.go @@ -148,7 +148,7 @@ func (self *Drive) prepareRemoteFiles(rootDir *drive.File) ([]*RemoteFile, error } func prepareRemoteRelPaths(root *drive.File, files []*drive.File) (map[string]string, error) { - // The graph will only hold integer values so we use + // The tree only holds integer values so we use // maps to lookup file by index and index by file id indexLookup := map[string]graph.NI{} fileLookup := map[graph.NI]*drive.File{} @@ -162,12 +162,13 @@ func prepareRemoteRelPaths(root *drive.File, files []*drive.File) (map[string]st fileLookup[graph.NI(i)] = f } - // Graph will hold relationship between parent and file - g := &graph.AdjacencyList{} + // This will hold 'parent index' -> 'file index' relationships + pathEnds := make([]graph.PathEnd, len(allFiles)) - // Add relationship between parent and file for all files to graph + // Prepare parent -> file relationships for i, f := range allFiles { if f == root { + pathEnds[i] = graph.PathEnd{From: -1} continue } @@ -176,10 +177,14 @@ func prepareRemoteRelPaths(root *drive.File, files []*drive.File) (map[string]st if !found { return nil, fmt.Errorf("Could not find parent of %s (%s)", f.Id, f.Name) } - - g.AddEdge(graph.NI(parentIdx), graph.NI(i)) + pathEnds[i] = graph.PathEnd{From: parentIdx} } + // Create parent pointer tree and calculate path lengths + tree := &graph.FromList{Paths: pathEnds} + tree.RecalcLeaves() + tree.RecalcLen() + // This will hold a map of file id => relative path paths := map[string]string{} @@ -190,7 +195,7 @@ func prepareRemoteRelPaths(root *drive.File, files []*drive.File) (map[string]st } // Find nodes between root and file - nodes := g.BreadthFirstPath(0, indexLookup[f.Id]) + nodes := tree.PathTo(indexLookup[f.Id], nil) // This will hold the name of all paths between root and // file (exluding root and including file itself) From c45e61e6406c188bcad432233a6eb25b4307a536 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 7 Feb 2016 14:34:48 +0100 Subject: [PATCH 096/195] Add sync list recursive command --- drive/sync.go | 38 +++++++++++++++++++++++------ drive/sync_list.go | 59 +++++++++++++++++++++++++++++++++++++++++++--- gdrive.go | 34 ++++++++++++++++++++++++++ handlers_drive.go | 13 ++++++++++ 4 files changed, 134 insertions(+), 10 deletions(-) diff --git a/drive/sync.go b/drive/sync.go index 3117e84e..c0d5addc 100644 --- a/drive/sync.go +++ b/drive/sync.go @@ -4,6 +4,7 @@ import ( "time" "fmt" "os" + "strings" "path/filepath" "github.com/soniakeys/graph" "github.com/sabhiram/go-git-ignore" @@ -24,7 +25,7 @@ func (self *Drive) prepareSyncFiles(localPath string, root *drive.File, cmp File }() go func() { - files, err := self.prepareRemoteFiles(root) + files, err := self.prepareRemoteFiles(root, "") remoteCh <- struct{files []*RemoteFile; err error}{files, err} }() @@ -103,10 +104,16 @@ func prepareLocalFiles(root string) ([]*LocalFile, error) { return files, err } -func (self *Drive) listAllFiles(q string, fields []googleapi.Field) ([]*drive.File, error) { +type listAllFilesArgs struct { + query string + fields []googleapi.Field + sortOrder string +} + +func (self *Drive) listAllFiles(args listAllFilesArgs) ([]*drive.File, error) { var files []*drive.File - err := self.service.Files.List().Q(q).Fields(fields...).PageSize(1000).Pages(context.TODO(), func(fl *drive.FileList) error { + err := self.service.Files.List().Q(args.query).Fields(args.fields...).OrderBy(args.sortOrder).PageSize(1000).Pages(context.TODO(), func(fl *drive.FileList) error { files = append(files, fl.Files...) return nil }) @@ -114,11 +121,14 @@ func (self *Drive) listAllFiles(q string, fields []googleapi.Field) ([]*drive.Fi return files, err } -func (self *Drive) prepareRemoteFiles(rootDir *drive.File) ([]*RemoteFile, error) { +func (self *Drive) prepareRemoteFiles(rootDir *drive.File, sortOrder string) ([]*RemoteFile, error) { // Find all files which has rootDir as root - query := fmt.Sprintf("appProperties has {key='syncRootId' and value='%s'}", rootDir.Id) - fields := []googleapi.Field{"nextPageToken", "files(id,name,parents,md5Checksum,mimeType)"} - files, err := self.listAllFiles(query, fields) + listArgs := listAllFilesArgs{ + query: fmt.Sprintf("appProperties has {key='syncRootId' and value='%s'}", rootDir.Id), + fields: []googleapi.Field{"nextPageToken", "files(id,name,parents,md5Checksum,mimeType,size,modifiedTime)"}, + sortOrder: sortOrder, + } + files, err := self.listAllFiles(listArgs) if err != nil { return nil, fmt.Errorf("Failed listing files: %s", err) } @@ -477,6 +487,20 @@ func (self byRemotePathLength) Less(i, j int) bool { return pathLength(self[i].relPath) < pathLength(self[j].relPath) } +type byRemotePath []*RemoteFile + +func (self byRemotePath) Len() int { + return len(self) +} + +func (self byRemotePath) Swap(i, j int) { + self[i], self[j] = self[j], self[i] +} + +func (self byRemotePath) Less(i, j int) bool { + return strings.ToLower(self[i].relPath) < strings.ToLower(self[j].relPath) +} + type ignoreFunc func(string) bool func prepareIgnorer(path string) (ignoreFunc, error) { diff --git a/drive/sync_list.go b/drive/sync_list.go index beb644e3..6ded6062 100644 --- a/drive/sync_list.go +++ b/drive/sync_list.go @@ -2,6 +2,7 @@ package drive import ( "fmt" + "sort" "io" "text/tabwriter" "google.golang.org/api/googleapi" @@ -14,9 +15,11 @@ type ListSyncArgs struct { } func (self *Drive) ListSync(args ListSyncArgs) error { - query := fmt.Sprintf("appProperties has {key='isSyncRoot' and value='true'}") - fields := []googleapi.Field{"nextPageToken", "files(id,name,mimeType,createdTime)"} - files, err := self.listAllFiles(query, fields) + listArgs := listAllFilesArgs{ + query: "appProperties has {key='isSyncRoot' and value='true'}", + fields: []googleapi.Field{"nextPageToken", "files(id,name,mimeType,createdTime)"}, + } + files, err := self.listAllFiles(listArgs) if err != nil { return err } @@ -24,6 +27,30 @@ func (self *Drive) ListSync(args ListSyncArgs) error { return nil } +type ListRecursiveSyncArgs struct { + Out io.Writer + RootId string + SkipHeader bool + PathWidth int64 + SizeInBytes bool + SortOrder string +} + +func (self *Drive) ListRecursiveSync(args ListRecursiveSyncArgs) error { + rootDir, err := self.getSyncRoot(args.RootId) + if err != nil { + return err + } + + files, err := self.prepareRemoteFiles(rootDir, args.SortOrder) + if err != nil { + return err + } + + printSyncDirContent(files, args) + return nil +} + func printSyncDirectories(files []*drive.File, args ListSyncArgs) { w := new(tabwriter.Writer) w.Init(args.Out, 0, 0, 3, ' ', 0) @@ -42,3 +69,29 @@ func printSyncDirectories(files []*drive.File, args ListSyncArgs) { w.Flush() } + +func printSyncDirContent(files []*RemoteFile, args ListRecursiveSyncArgs) { + if args.SortOrder == "" { + // Sort files by path + sort.Sort(byRemotePath(files)) + } + + w := new(tabwriter.Writer) + w.Init(args.Out, 0, 0, 3, ' ', 0) + + if !args.SkipHeader { + fmt.Fprintln(w, "Id\tPath\tType\tSize\tModified") + } + + for _, rf := range files { + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", + rf.file.Id, + truncateString(rf.relPath, int(args.PathWidth)), + filetype(rf.file), + formatSize(rf.file.Size, args.SizeInBytes), + formatDatetime(rf.file.ModifiedTime), + ) + } + + w.Flush() +} diff --git a/gdrive.go b/gdrive.go index d906ada1..a4d95bcf 100644 --- a/gdrive.go +++ b/gdrive.go @@ -12,6 +12,7 @@ const Version = "2.0.0" const DefaultMaxFiles = 30 const DefaultMaxChanges = 100 const DefaultNameWidth = 40 +const DefaultPathWidth = 60 const DefaultUploadChunkSize = 8 * 1024 * 1024 const DefaultQuery = "trashed = false and 'me' in owners" const DefaultShareRole = "reader" @@ -352,6 +353,39 @@ func main() { ), }, }, + &cli.Handler{ + Pattern: "[global] sync list recursive [options] ", + Description: "List content of syncable directory", + Callback: listRecursiveSyncHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", + cli.StringFlag{ + Name: "sortOrder", + Patterns: []string{"--order"}, + Description: "Sort order. See https://godoc.org/google.golang.org/api/drive/v3#FilesListCall.OrderBy", + }, + cli.IntFlag{ + Name: "pathWidth", + Patterns: []string{"--path-width"}, + Description: fmt.Sprintf("Width of path column, default: %d, minimum: 9, use 0 for full width", DefaultPathWidth), + DefaultValue: DefaultPathWidth, + }, + cli.BoolFlag{ + Name: "skipHeader", + Patterns: []string{"--no-header"}, + Description: "Dont print the header", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "sizeInBytes", + Patterns: []string{"--bytes"}, + Description: "Size in bytes", + OmitValue: true, + }, + ), + }, + }, &cli.Handler{ Pattern: "[global] sync download [options] ", Description: "Sync drive directory to local directory", diff --git a/handlers_drive.go b/handlers_drive.go index 71f5201b..a6f288a8 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -239,6 +239,19 @@ func listSyncHandler(ctx cli.Context) { checkErr(err) } +func listRecursiveSyncHandler(ctx cli.Context) { + args := ctx.Args() + err := newDrive(args).ListRecursiveSync(drive.ListRecursiveSyncArgs{ + Out: os.Stdout, + RootId: args.String("id"), + SkipHeader: args.Bool("skipHeader"), + PathWidth: args.Int64("pathWidth"), + SizeInBytes: args.Bool("sizeInBytes"), + SortOrder: args.String("sortOrder"), + }) + checkErr(err) +} + func deleteRevisionHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).DeleteRevision(drive.DeleteRevisionArgs{ From 6199148887e51431f3479cce4f7b6d713cd6ba7f Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 7 Feb 2016 22:01:57 +0100 Subject: [PATCH 097/195] Retry create directory on backend error --- drive/sync_upload.go | 72 ++++++++++++++++++++++++++++++-------------- 1 file changed, 49 insertions(+), 23 deletions(-) diff --git a/drive/sync_upload.go b/drive/sync_upload.go index c4df1e85..f4efcb0d 100644 --- a/drive/sync_upload.go +++ b/drive/sync_upload.go @@ -134,36 +134,36 @@ func (self *Drive) createMissingRemoteDirs(files *syncFiles, args UploadSyncArgs return nil, fmt.Errorf("Could not find remote directory with path '%s'", parentPath) } - dstFile := &drive.File{ - Name: lf.info.Name(), - MimeType: DirectoryMimeType, - Parents: []string{parent.file.Id}, - AppProperties: map[string]string{"syncRootId": args.RootId}, + fmt.Fprintf(args.Out, "[%04d/%04d] Creating directory %s\n", i + 1, missingCount, filepath.Join(files.root.file.Name, lf.relPath)) + + f, err := self.createMissingRemoteDir(createMissingRemoteDirArgs{ + name: lf.info.Name(), + parentId: parent.file.Id, + rootId: args.RootId, + dryRun: args.DryRun, + try: 0, + }) + if err != nil { + return nil, err } - fmt.Fprintf(args.Out, "[%04d/%04d] Creating directory: %s\n", i + 1, missingCount, filepath.Join(files.root.file.Name, lf.relPath)) - - if args.DryRun { - files.remote = append(files.remote, &RemoteFile{ - relPath: lf.relPath, - file: dstFile, - }) - } else { - f, err := self.service.Files.Create(dstFile).Do() - if err != nil { - return nil, fmt.Errorf("Failed to create directory: %s", err) - } - - files.remote = append(files.remote, &RemoteFile{ - relPath: lf.relPath, - file: f, - }) - } + files.remote = append(files.remote, &RemoteFile{ + relPath: lf.relPath, + file: f, + }) } return files, nil } +type createMissingRemoteDirArgs struct { + name string + parentId string + rootId string + dryRun bool + try int +} + func (self *Drive) uploadMissingFiles(files *syncFiles, args UploadSyncArgs) error { missingFiles := files.filterMissingRemoteFiles() missingCount := len(missingFiles) @@ -245,6 +245,32 @@ func (self *Drive) deleteExtraneousRemoteFiles(files *syncFiles, args UploadSync return nil } +func (self *Drive) createMissingRemoteDir(args createMissingRemoteDirArgs) (*drive.File, error) { + dstFile := &drive.File{ + Name: args.name, + MimeType: DirectoryMimeType, + Parents: []string{args.parentId}, + AppProperties: map[string]string{"syncRootId": args.rootId}, + } + + if args.dryRun { + return dstFile, nil + } + + f, err := self.service.Files.Create(dstFile).Do() + if err != nil { + if isBackendError(err) && args.try < MaxBackendErrorRetries { + exponentialBackoffSleep(args.try) + args.try++ + self.createMissingRemoteDir(args) + } else { + return nil, fmt.Errorf("Failed to create directory: %s", err) + } + } + + return f, nil +} + func (self *Drive) uploadMissingFile(parentId string, lf *LocalFile, args UploadSyncArgs, try int) error { srcFile, err := os.Open(lf.absPath) if err != nil { From b56ee90075d3b073bd87e5527528b62f85ef031e Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 7 Feb 2016 22:11:28 +0100 Subject: [PATCH 098/195] Retry update file on backend error --- drive/sync_upload.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drive/sync_upload.go b/drive/sync_upload.go index f4efcb0d..15a03275 100644 --- a/drive/sync_upload.go +++ b/drive/sync_upload.go @@ -209,7 +209,7 @@ func (self *Drive) updateChangedFiles(files *syncFiles, args UploadSyncArgs) err continue } - err := self.updateChangedFile(cf, args) + err := self.updateChangedFile(cf, args, 0) if err != nil { return err } @@ -307,7 +307,7 @@ func (self *Drive) uploadMissingFile(parentId string, lf *LocalFile, args Upload return nil } -func (self *Drive) updateChangedFile(cf *changedFile, args UploadSyncArgs) error { +func (self *Drive) updateChangedFile(cf *changedFile, args UploadSyncArgs, try int) error { srcFile, err := os.Open(cf.local.absPath) if err != nil { return fmt.Errorf("Failed to open file: %s", err) @@ -327,7 +327,13 @@ func (self *Drive) updateChangedFile(cf *changedFile, args UploadSyncArgs) error _, err = self.service.Files.Update(cf.remote.file.Id, dstFile).Media(srcReader, chunkSize).Do() if err != nil { - return fmt.Errorf("Failed to update file: %s", err) + if isBackendError(err) && try < MaxBackendErrorRetries { + exponentialBackoffSleep(try) + try++ + self.updateChangedFile(cf, args, try) + } else { + return fmt.Errorf("Failed to update file: %s", err) + } } return nil From 23919aa1adfc693b45a292c449418ee78d744df0 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 7 Feb 2016 22:14:23 +0100 Subject: [PATCH 099/195] Retry delete file on backend error --- drive/sync_upload.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drive/sync_upload.go b/drive/sync_upload.go index 15a03275..f5bc48b7 100644 --- a/drive/sync_upload.go +++ b/drive/sync_upload.go @@ -236,7 +236,7 @@ func (self *Drive) deleteExtraneousRemoteFiles(files *syncFiles, args UploadSync continue } - err := self.deleteRemoteFile(rf, args) + err := self.deleteRemoteFile(rf, args, 0) if err != nil { return err } @@ -339,10 +339,16 @@ func (self *Drive) updateChangedFile(cf *changedFile, args UploadSyncArgs, try i return nil } -func (self *Drive) deleteRemoteFile(rf *RemoteFile, args UploadSyncArgs) error { +func (self *Drive) deleteRemoteFile(rf *RemoteFile, args UploadSyncArgs, try int) error { err := self.service.Files.Delete(rf.file.Id).Do() if err != nil { - return fmt.Errorf("Failed to delete file: %s", err) + if isBackendError(err) && try < MaxBackendErrorRetries { + exponentialBackoffSleep(try) + try++ + self.deleteRemoteFile(rf, args, try) + } else { + return fmt.Errorf("Failed to delete file: %s", err) + } } return nil From b915aed4e6b8a4f2ef4a000af2d9af7978b1d991 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 7 Feb 2016 22:44:57 +0100 Subject: [PATCH 100/195] Retry download file on backend error --- drive/sync_download.go | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/drive/sync_download.go b/drive/sync_download.go index f18cb18a..10a8d186 100644 --- a/drive/sync_download.go +++ b/drive/sync_download.go @@ -137,7 +137,7 @@ func (self *Drive) downloadMissingFiles(files *syncFiles, args DownloadSyncArgs) continue } - err = self.downloadRemoteFile(rf.file.Id, localPath, args) + err = self.downloadRemoteFile(rf.file.Id, localPath, args, 0) if err != nil { return err } @@ -166,7 +166,7 @@ func (self *Drive) downloadChangedFiles(files *syncFiles, args DownloadSyncArgs) continue } - err = self.downloadRemoteFile(cf.remote.file.Id, localPath, args) + err = self.downloadRemoteFile(cf.remote.file.Id, localPath, args, 0) if err != nil { return err } @@ -175,10 +175,16 @@ func (self *Drive) downloadChangedFiles(files *syncFiles, args DownloadSyncArgs) return nil } -func (self *Drive) downloadRemoteFile(id, fpath string, args DownloadSyncArgs) error { +func (self *Drive) downloadRemoteFile(id, fpath string, args DownloadSyncArgs, try int) error { res, err := self.service.Files.Get(id).Download() if err != nil { - return fmt.Errorf("Failed to download file: %s", err) + if isBackendError(err) && try < MaxBackendErrorRetries { + exponentialBackoffSleep(try) + try++ + self.downloadRemoteFile(id, fpath, args, try) + } else { + return fmt.Errorf("Failed to download file: %s", err) + } } // Close body on function exit @@ -203,7 +209,11 @@ func (self *Drive) downloadRemoteFile(id, fpath string, args DownloadSyncArgs) e // Save file to disk _, err = io.Copy(outFile, srcReader) - if err != nil { + if err != nil && try < MaxBackendErrorRetries { + exponentialBackoffSleep(try) + try++ + self.downloadRemoteFile(id, fpath, args, try) + } else { return fmt.Errorf("Download was interrupted: %s", err) } From 0bbe6c4a4b35511c0c16040abd122cf746be9167 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 7 Feb 2016 23:21:32 +0100 Subject: [PATCH 101/195] Support listing more than 1000 files --- drive/list.go | 56 ++++++++++++++++++++++++++++++++++++++++++++++++--- drive/sync.go | 18 ----------------- drive/util.go | 5 +++++ 3 files changed, 58 insertions(+), 21 deletions(-) diff --git a/drive/list.go b/drive/list.go index d3d8f3d0..e6365852 100644 --- a/drive/list.go +++ b/drive/list.go @@ -4,7 +4,9 @@ import ( "fmt" "io" "text/tabwriter" + "golang.org/x/net/context" "google.golang.org/api/drive/v3" + "google.golang.org/api/googleapi" ) type ListFilesArgs struct { @@ -18,14 +20,20 @@ type ListFilesArgs struct { } func (self *Drive) List(args ListFilesArgs) (err error) { - fileList, err := self.service.Files.List().PageSize(args.MaxFiles).Q(args.Query).OrderBy(args.SortOrder).Fields("files(id,name,md5Checksum,mimeType,size,createdTime)").Do() + listArgs := listAllFilesArgs{ + query: args.Query, + fields: []googleapi.Field{"nextPageToken", "files(id,name,md5Checksum,mimeType,size,createdTime)"}, + sortOrder: args.SortOrder, + maxFiles: args.MaxFiles, + } + files, err := self.listAllFiles(listArgs) if err != nil { - return fmt.Errorf("Failed listing files: %s", err) + return fmt.Errorf("Failed to list files: %s", err) } PrintFileList(PrintFileListArgs{ Out: args.Out, - Files: fileList.Files, + Files: files, NameWidth: int(args.NameWidth), SkipHeader: args.SkipHeader, SizeInBytes: args.SizeInBytes, @@ -34,6 +42,48 @@ func (self *Drive) List(args ListFilesArgs) (err error) { return } +type listAllFilesArgs struct { + query string + fields []googleapi.Field + sortOrder string + maxFiles int64 +} + +func (self *Drive) listAllFiles(args listAllFilesArgs) ([]*drive.File, error) { + var files []*drive.File + + var pageSize int64 + if args.maxFiles > 0 && args.maxFiles < 1000 { + pageSize = args.maxFiles + } else { + pageSize = 1000 + } + + controlledStop := fmt.Errorf("Controlled stop") + + err := self.service.Files.List().Q(args.query).Fields(args.fields...).OrderBy(args.sortOrder).PageSize(pageSize).Pages(context.TODO(), func(fl *drive.FileList) error { + files = append(files, fl.Files...) + + // Stop when we have all the files we need + if args.maxFiles > 0 && len(files) >= int(args.maxFiles) { + return controlledStop + } + + return nil + }) + + if err != nil && err != controlledStop { + return nil, err + } + + if args.maxFiles > 0 { + n := min(len(files), int(args.maxFiles)) + return files[:n], nil + } + + return files, nil +} + type PrintFileListArgs struct { Out io.Writer Files []*drive.File diff --git a/drive/sync.go b/drive/sync.go index c0d5addc..86ddb3bc 100644 --- a/drive/sync.go +++ b/drive/sync.go @@ -8,7 +8,6 @@ import ( "path/filepath" "github.com/soniakeys/graph" "github.com/sabhiram/go-git-ignore" - "golang.org/x/net/context" "google.golang.org/api/drive/v3" "google.golang.org/api/googleapi" ) @@ -104,23 +103,6 @@ func prepareLocalFiles(root string) ([]*LocalFile, error) { return files, err } -type listAllFilesArgs struct { - query string - fields []googleapi.Field - sortOrder string -} - -func (self *Drive) listAllFiles(args listAllFilesArgs) ([]*drive.File, error) { - var files []*drive.File - - err := self.service.Files.List().Q(args.query).Fields(args.fields...).OrderBy(args.sortOrder).PageSize(1000).Pages(context.TODO(), func(fl *drive.FileList) error { - files = append(files, fl.Files...) - return nil - }) - - return files, err -} - func (self *Drive) prepareRemoteFiles(rootDir *drive.File, sortOrder string) ([]*RemoteFile, error) { // Find all files which has rootDir as root listArgs := listAllFilesArgs{ diff --git a/drive/util.go b/drive/util.go index 481a48e8..7a7845a3 100644 --- a/drive/util.go +++ b/drive/util.go @@ -148,3 +148,8 @@ func pow(x int, y int) int { f := math.Pow(float64(x), float64(y)) return int(f) } + +func min(x int, y int) int { + n := math.Min(float64(x), float64(y)) + return int(n) +} From 9e2fbc9471996cdab057c2d3807a847424653036 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Mon, 8 Feb 2016 21:50:21 +0100 Subject: [PATCH 102/195] Use listAllFiles --- drive/download.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drive/download.go b/drive/download.go index 468f96f3..8477fd71 100644 --- a/drive/download.go +++ b/drive/download.go @@ -7,6 +7,7 @@ import ( "time" "path/filepath" "google.golang.org/api/drive/v3" + "google.golang.org/api/googleapi" ) type DownloadArgs struct { @@ -102,15 +103,18 @@ func (self *Drive) downloadBinary(f *drive.File, args DownloadArgs) error { } func (self *Drive) downloadDirectory(parent *drive.File, args DownloadArgs) error { - query := fmt.Sprintf("'%s' in parents", parent.Id) - fileList, err := self.service.Files.List().Q(query).Fields("files(id,name)").Do() + listArgs := listAllFilesArgs{ + query: fmt.Sprintf("'%s' in parents", parent.Id), + fields: []googleapi.Field{"nextPageToken", "files(id,name)"}, + } + files, err := self.listAllFiles(listArgs) if err != nil { return fmt.Errorf("Failed listing files: %s", err) } newPath := filepath.Join(args.Path, parent.Name) - for _, f := range fileList.Files { + for _, f := range files { // Copy args and update changed fields newArgs := args newArgs.Path = newPath From 48221145e64d65fb7afc1dba40e6f41bb7b76e7d Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Mon, 8 Feb 2016 22:51:01 +0100 Subject: [PATCH 103/195] Fix error check --- drive/sync_download.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drive/sync_download.go b/drive/sync_download.go index 10a8d186..20ff87d2 100644 --- a/drive/sync_download.go +++ b/drive/sync_download.go @@ -209,12 +209,14 @@ func (self *Drive) downloadRemoteFile(id, fpath string, args DownloadSyncArgs, t // Save file to disk _, err = io.Copy(outFile, srcReader) - if err != nil && try < MaxBackendErrorRetries { - exponentialBackoffSleep(try) - try++ - self.downloadRemoteFile(id, fpath, args, try) - } else { - return fmt.Errorf("Download was interrupted: %s", err) + if err != nil { + if try < MaxBackendErrorRetries { + exponentialBackoffSleep(try) + try++ + self.downloadRemoteFile(id, fpath, args, try) + } else { + return fmt.Errorf("Download was interrupted: %s", err) + } } return nil From db952bdd07a69023f6fcc18f4de4ed0bfba5d319 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Mon, 8 Feb 2016 23:05:11 +0100 Subject: [PATCH 104/195] Print relative path, etc --- drive/sync_download.go | 20 +++++++++----------- drive/sync_upload.go | 4 ++-- 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/drive/sync_download.go b/drive/sync_download.go index 20ff87d2..d918858b 100644 --- a/drive/sync_download.go +++ b/drive/sync_download.go @@ -101,17 +101,17 @@ func (self *Drive) createMissingLocalDirs(files *syncFiles, args DownloadSyncArg sort.Sort(byRemotePathLength(missingDirs)) for i, rf := range missingDirs { - path, err := filepath.Abs(filepath.Join(args.Path, rf.relPath)) + absPath, err := filepath.Abs(filepath.Join(args.Path, rf.relPath)) if err != nil { return fmt.Errorf("Failed to determine local absolute path: %s", err) } - fmt.Fprintf(args.Out, "[%04d/%04d] Creating directory: %s\n", i + 1, missingCount, path) + fmt.Fprintf(args.Out, "[%04d/%04d] Creating directory %s\n", i + 1, missingCount, filepath.Join(filepath.Base(args.Path), rf.relPath)) if args.DryRun { continue } - mkdir(path) + os.MkdirAll(absPath, 0775) } return nil @@ -126,18 +126,17 @@ func (self *Drive) downloadMissingFiles(files *syncFiles, args DownloadSyncArgs) } for i, rf := range missingFiles { - remotePath := filepath.Join(files.root.file.Name, rf.relPath) - localPath, err := filepath.Abs(filepath.Join(args.Path, rf.relPath)) + absPath, err := filepath.Abs(filepath.Join(args.Path, rf.relPath)) if err != nil { return fmt.Errorf("Failed to determine local absolute path: %s", err) } - fmt.Fprintf(args.Out, "[%04d/%04d] Downloading %s -> %s\n", i + 1, missingCount, remotePath, localPath) + fmt.Fprintf(args.Out, "[%04d/%04d] Downloading %s -> %s\n", i + 1, missingCount, rf.relPath, filepath.Join(filepath.Base(args.Path), rf.relPath)) if args.DryRun { continue } - err = self.downloadRemoteFile(rf.file.Id, localPath, args, 0) + err = self.downloadRemoteFile(rf.file.Id, absPath, args, 0) if err != nil { return err } @@ -155,18 +154,17 @@ func (self *Drive) downloadChangedFiles(files *syncFiles, args DownloadSyncArgs) } for i, cf := range changedFiles { - remotePath := filepath.Join(files.root.file.Name, cf.remote.relPath) - localPath, err := filepath.Abs(filepath.Join(args.Path, cf.remote.relPath)) + absPath, err := filepath.Abs(filepath.Join(args.Path, cf.remote.relPath)) if err != nil { return fmt.Errorf("Failed to determine local absolute path: %s", err) } - fmt.Fprintf(args.Out, "[%04d/%04d] Downloading %s -> %s\n", i + 1, changedCount, remotePath, localPath) + fmt.Fprintf(args.Out, "[%04d/%04d] Downloading %s -> %s\n", i + 1, changedCount, cf.remote.relPath, filepath.Join(filepath.Base(args.Path), cf.remote.relPath)) if args.DryRun { continue } - err = self.downloadRemoteFile(cf.remote.file.Id, localPath, args, 0) + err = self.downloadRemoteFile(cf.remote.file.Id, absPath, args, 0) if err != nil { return err } diff --git a/drive/sync_upload.go b/drive/sync_upload.go index f5bc48b7..ca81b5c6 100644 --- a/drive/sync_upload.go +++ b/drive/sync_upload.go @@ -179,7 +179,7 @@ func (self *Drive) uploadMissingFiles(files *syncFiles, args UploadSyncArgs) err return fmt.Errorf("Could not find remote directory with path '%s'", parentPath) } - fmt.Fprintf(args.Out, "[%04d/%04d] Uploading %s -> %s\n", i + 1, missingCount, lf.absPath, filepath.Join(files.root.file.Name, lf.relPath)) + fmt.Fprintf(args.Out, "[%04d/%04d] Uploading %s -> %s\n", i + 1, missingCount, lf.relPath, filepath.Join(files.root.file.Name, lf.relPath)) if args.DryRun { continue @@ -203,7 +203,7 @@ func (self *Drive) updateChangedFiles(files *syncFiles, args UploadSyncArgs) err } for i, cf := range changedFiles { - fmt.Fprintf(args.Out, "[%04d/%04d] Updating %s -> %s\n", i + 1, changedCount, cf.local.absPath, filepath.Join(files.root.file.Name, cf.local.relPath)) + fmt.Fprintf(args.Out, "[%04d/%04d] Updating %s -> %s\n", i + 1, changedCount, cf.local.relPath, filepath.Join(files.root.file.Name, cf.local.relPath)) if args.DryRun { continue From ad4309f1028d165b8905a6a22be89191f787fc39 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Wed, 10 Feb 2016 20:53:37 +0100 Subject: [PATCH 105/195] Minor message change --- drive/sync_download.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drive/sync_download.go b/drive/sync_download.go index d918858b..f9471428 100644 --- a/drive/sync_download.go +++ b/drive/sync_download.go @@ -31,7 +31,7 @@ func (self *Drive) DownloadSync(args DownloadSyncArgs) error { return err } - fmt.Fprintln(args.Out, "Collecting local and remote file information...") + fmt.Fprintln(args.Out, "Collecting file information...") files, err := self.prepareSyncFiles(args.Path, rootDir, args.Comparer) if err != nil { return err From 5eae4f159d340d257e41e75604a0fc831cb76381 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Fri, 12 Feb 2016 23:19:03 +0100 Subject: [PATCH 106/195] Add conflict handling and flags for downloads --- drive/sync.go | 88 ++++++++++++++++++++++++++++++++++++++++++ drive/sync_download.go | 75 +++++++++++++++++++++++++++++++++-- gdrive.go | 30 +++++++++++--- handlers_drive.go | 25 ++++++++++++ 4 files changed, 209 insertions(+), 9 deletions(-) diff --git a/drive/sync.go b/drive/sync.go index 86ddb3bc..f39c1c63 100644 --- a/drive/sync.go +++ b/drive/sync.go @@ -4,8 +4,10 @@ import ( "time" "fmt" "os" + "io" "strings" "path/filepath" + "text/tabwriter" "github.com/soniakeys/graph" "github.com/sabhiram/go-git-ignore" "google.golang.org/api/drive/v3" @@ -14,6 +16,31 @@ import ( const DefaultIgnoreFile = ".gdriveignore" +type ModTime int + +const ( + LocalLastModified ModTime = iota + RemoteLastModified + EqualModifiedTime +) + +type LargestSize int + +const ( + LocalLargestSize LargestSize = iota + RemoteLargestSize + EqualSize +) + +type ConflictResolution int + +const ( + NoResolution ConflictResolution = iota + KeepLocal + KeepRemote + KeepLargest +) + func (self *Drive) prepareSyncFiles(localPath string, root *drive.File, cmp FileComparer) (*syncFiles, error) { localCh := make(chan struct{files []*LocalFile; err error}) remoteCh := make(chan struct{files []*RemoteFile; err error}) @@ -281,6 +308,36 @@ func (self RemoteFile) Modified() time.Time { return t } +func (self *changedFile) compareModTime() ModTime { + localTime := self.local.Modified() + remoteTime := self.remote.Modified() + + if localTime.After(remoteTime) { + return LocalLastModified + } + + if remoteTime.After(localTime) { + return RemoteLastModified + } + + return EqualModifiedTime +} + +func (self *changedFile) compareSize() LargestSize { + localSize := self.local.Size() + remoteSize := self.remote.Size() + + if localSize > remoteSize { + return LocalLargestSize + } + + if remoteSize > localSize { + return RemoteLargestSize + } + + return EqualSize +} + func (self *syncFiles) filterMissingRemoteDirs() []*LocalFile { var files []*LocalFile @@ -441,6 +498,18 @@ func (self *syncFiles) findLocalByPath(relPath string) (*LocalFile, bool) { return nil, false } +func findLocalConflicts(files []*changedFile) []*changedFile { + var conflicts []*changedFile + + for _, cf := range files { + if cf.compareModTime() == LocalLastModified { + conflicts = append(conflicts, cf) + } + } + + return conflicts +} + type byLocalPathLength []*LocalFile func (self byLocalPathLength) Len() int { @@ -501,3 +570,22 @@ func prepareIgnorer(path string) (ignoreFunc, error) { return ignorer.MatchesPath, nil } + +func formatConflicts(conflicts []*changedFile, out io.Writer) { + w := new(tabwriter.Writer) + w.Init(out, 0, 0, 3, ' ', 0) + + fmt.Fprintln(w, "Path\tSize Local\tSize Remote\tModified Local\tModified Remote") + + for _, cf := range conflicts { + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", + truncateString(cf.local.relPath, 60), + formatSize(cf.local.Size(), false), + formatSize(cf.remote.Size(), false), + cf.local.Modified().Local().Format("Jan _2 2006 15:04:05.000"), + cf.remote.Modified().Local().Format("Jan _2 2006 15:04:05.000"), + ) + } + + w.Flush() +} diff --git a/drive/sync_download.go b/drive/sync_download.go index f9471428..9dcd7191 100644 --- a/drive/sync_download.go +++ b/drive/sync_download.go @@ -6,6 +6,7 @@ import ( "os" "sort" "time" + "bytes" "path/filepath" "google.golang.org/api/googleapi" "google.golang.org/api/drive/v3" @@ -18,6 +19,7 @@ type DownloadSyncArgs struct { Path string DryRun bool DeleteExtraneous bool + Resolution ConflictResolution Comparer FileComparer } @@ -37,8 +39,19 @@ func (self *Drive) DownloadSync(args DownloadSyncArgs) error { return err } + // Find changed files + changedFiles := files.filterChangedRemoteFiles() + fmt.Fprintf(args.Out, "Found %d local files and %d remote files\n", len(files.local), len(files.remote)) + // Ensure that that we don't overwrite any local changes + if args.Resolution == NoResolution { + err = ensureNoLocalModifications(changedFiles) + if err != nil { + return fmt.Errorf("Conflict detected!\nThe following files have changed and the local file are newer than it's remote counterpart:\n\n%s\nNo conflict resolution was given, aborting...", err) + } + } + // Create missing directories err = self.createMissingLocalDirs(files, args) if err != nil { @@ -52,7 +65,7 @@ func (self *Drive) DownloadSync(args DownloadSyncArgs) error { } // Download files that has changed - err = self.downloadChangedFiles(files, args) + err = self.downloadChangedFiles(changedFiles, args) if err != nil { return err } @@ -145,8 +158,7 @@ func (self *Drive) downloadMissingFiles(files *syncFiles, args DownloadSyncArgs) return nil } -func (self *Drive) downloadChangedFiles(files *syncFiles, args DownloadSyncArgs) error { - changedFiles := files.filterChangedRemoteFiles() +func (self *Drive) downloadChangedFiles(changedFiles []*changedFile, args DownloadSyncArgs) error { changedCount := len(changedFiles) if changedCount > 0 { @@ -154,6 +166,11 @@ func (self *Drive) downloadChangedFiles(files *syncFiles, args DownloadSyncArgs) } for i, cf := range changedFiles { + if skip, reason := checkLocalConflict(cf, args.Resolution); skip { + fmt.Fprintf(args.Out, "[%04d/%04d] Skipping %s (%s)\n", i + 1, changedCount, cf.remote.relPath, reason) + continue + } + absPath, err := filepath.Abs(filepath.Join(args.Path, cf.remote.relPath)) if err != nil { return fmt.Errorf("Failed to determine local absolute path: %s", err) @@ -246,3 +263,55 @@ func (self *Drive) deleteExtraneousLocalFiles(files *syncFiles, args DownloadSyn return nil } + +func checkLocalConflict(cf *changedFile, resolution ConflictResolution) (bool, string) { + // No conflict unless local file was last modified + if cf.compareModTime() != LocalLastModified { + return false, "" + } + + // Don't skip if want to keep the remote file + if resolution == KeepRemote { + return false, "" + } + + // Skip if we want to keep the local file + if resolution == KeepLocal { + return true, "conflicting file, keeping local file" + } + + if resolution == KeepLargest { + largest := cf.compareSize() + + // Skip if the local file is largest + if largest == LocalLargestSize { + return true, "conflicting file, local file is largest, keeping local" + } + + // Don't skip if the remote file is largest + if largest == RemoteLargestSize { + return false, "" + } + + // Keep local if both files have the same size + if largest == EqualSize { + return true, "conflicting file, file sizes are equal, keeping local" + } + } + + // The conditionals above should cover all cases, + // unless the programmer did something wrong, + // in which case we default to being non-destructive and skip the file + return true, "conflicting file, unhandled case" +} + +func ensureNoLocalModifications(files []*changedFile) error { + conflicts := findLocalConflicts(files) + if len(conflicts) == 0 { + return nil + } + + buffer := bytes.NewBufferString("") + formatConflicts(conflicts, buffer) + return fmt.Errorf(buffer.String()) +} diff --git a/gdrive.go b/gdrive.go index a4d95bcf..43f992d8 100644 --- a/gdrive.go +++ b/gdrive.go @@ -394,15 +394,21 @@ func main() { cli.NewFlagGroup("global", globalFlags...), cli.NewFlagGroup("options", cli.BoolFlag{ - Name: "noProgress", - Patterns: []string{"--no-progress"}, - Description: "Hide progress", + Name: "keepRemote", + Patterns: []string{"--keep-remote"}, + Description: "Keep remote file when a conflict is encountered", OmitValue: true, }, cli.BoolFlag{ - Name: "dryRun", - Patterns: []string{"--dry-run"}, - Description: "Show what would have been transferred", + Name: "keepLocal", + Patterns: []string{"--keep-local"}, + Description: "Keep local file when a conflict is encountered", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "keepLargest", + Patterns: []string{"--keep-largest"}, + Description: "Keep largest file when a conflict is encountered", OmitValue: true, }, cli.BoolFlag{ @@ -411,6 +417,18 @@ func main() { Description: "Delete extraneous local files", OmitValue: true, }, + cli.BoolFlag{ + Name: "dryRun", + Patterns: []string{"--dry-run"}, + Description: "Show what would have been transferred", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "noProgress", + Patterns: []string{"--no-progress"}, + Description: "Hide progress", + OmitValue: true, + }, ), }, }, diff --git a/handlers_drive.go b/handlers_drive.go index a6f288a8..aaf957c3 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -68,6 +68,7 @@ func downloadSyncHandler(ctx cli.Context) { RootId: args.String("id"), DryRun: args.Bool("dryRun"), DeleteExtraneous: args.Bool("deleteExtraneous"), + Resolution: conflictResolution(args), Comparer: NewCachedMd5Comparer(cachePath), }) checkErr(err) @@ -324,3 +325,27 @@ func progressWriter(discard bool) io.Writer { } return os.Stderr } + +func conflictResolution(args cli.Arguments) drive.ConflictResolution { + keepLocal := args.Bool("keepLocal") + keepRemote := args.Bool("keepRemote") + keepLargest := args.Bool("keepLargest") + + if (keepLocal && keepRemote) || (keepLocal && keepLargest) || (keepRemote && keepLargest) { + ExitF("Only one conflict resolution flag can be given") + } + + if keepLocal { + return drive.KeepLocal + } + + if keepRemote { + return drive.KeepRemote + } + + if keepLargest { + return drive.KeepLargest + } + + return drive.NoResolution +} From 12e431b5e17ff4ceb6680987bd4649a94b83f896 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 13 Feb 2016 01:09:47 +0100 Subject: [PATCH 107/195] Add conflict handling and flags for uploads --- drive/sync.go | 12 +++++++ drive/sync_upload.go | 77 +++++++++++++++++++++++++++++++++++++++++--- gdrive.go | 30 +++++++++++++---- handlers_drive.go | 1 + 4 files changed, 110 insertions(+), 10 deletions(-) diff --git a/drive/sync.go b/drive/sync.go index f39c1c63..204e7c23 100644 --- a/drive/sync.go +++ b/drive/sync.go @@ -510,6 +510,18 @@ func findLocalConflicts(files []*changedFile) []*changedFile { return conflicts } +func findRemoteConflicts(files []*changedFile) []*changedFile { + var conflicts []*changedFile + + for _, cf := range files { + if cf.compareModTime() == RemoteLastModified { + conflicts = append(conflicts, cf) + } + } + + return conflicts +} + type byLocalPathLength []*LocalFile func (self byLocalPathLength) Len() int { diff --git a/drive/sync_upload.go b/drive/sync_upload.go index ca81b5c6..949d8a6b 100644 --- a/drive/sync_upload.go +++ b/drive/sync_upload.go @@ -6,6 +6,7 @@ import ( "os" "time" "sort" + "bytes" "path/filepath" "google.golang.org/api/googleapi" "google.golang.org/api/drive/v3" @@ -19,6 +20,7 @@ type UploadSyncArgs struct { DryRun bool DeleteExtraneous bool ChunkSize int64 + Resolution ConflictResolution Comparer FileComparer } @@ -42,8 +44,19 @@ func (self *Drive) UploadSync(args UploadSyncArgs) error { return err } + // Find changed files + changedFiles := files.filterChangedLocalFiles() + fmt.Fprintf(args.Out, "Found %d local files and %d remote files\n", len(files.local), len(files.remote)) + // Ensure that that we don't overwrite any remote changes + if args.Resolution == NoResolution { + err = ensureNoRemoteModifications(changedFiles) + if err != nil { + return fmt.Errorf("Conflict detected!\nThe following files have changed and the remote file are newer than it's local counterpart:\n\n%s\nNo conflict resolution was given, aborting...", err) + } + } + // Create missing directories files, err = self.createMissingRemoteDirs(files, args) if err != nil { @@ -57,7 +70,7 @@ func (self *Drive) UploadSync(args UploadSyncArgs) error { } // Update modified files - err = self.updateChangedFiles(files, args) + err = self.updateChangedFiles(changedFiles, rootDir, args) if err != nil { return err } @@ -194,8 +207,7 @@ func (self *Drive) uploadMissingFiles(files *syncFiles, args UploadSyncArgs) err return nil } -func (self *Drive) updateChangedFiles(files *syncFiles, args UploadSyncArgs) error { - changedFiles := files.filterChangedLocalFiles() +func (self *Drive) updateChangedFiles(changedFiles []*changedFile, root *drive.File, args UploadSyncArgs) error { changedCount := len(changedFiles) if changedCount > 0 { @@ -203,7 +215,12 @@ func (self *Drive) updateChangedFiles(files *syncFiles, args UploadSyncArgs) err } for i, cf := range changedFiles { - fmt.Fprintf(args.Out, "[%04d/%04d] Updating %s -> %s\n", i + 1, changedCount, cf.local.relPath, filepath.Join(files.root.file.Name, cf.local.relPath)) + if skip, reason := checkRemoteConflict(cf, args.Resolution); skip { + fmt.Fprintf(args.Out, "[%04d/%04d] Skipping %s (%s)\n", i + 1, changedCount, cf.local.relPath, reason) + continue + } + + fmt.Fprintf(args.Out, "[%04d/%04d] Updating %s -> %s\n", i + 1, changedCount, cf.local.relPath, filepath.Join(root.Name, cf.local.relPath)) if args.DryRun { continue @@ -363,3 +380,55 @@ func (self *Drive) dirIsEmpty(id string) (bool, error) { return len(fileList.Files) == 0, nil } + +func checkRemoteConflict(cf *changedFile, resolution ConflictResolution) (bool, string) { + // No conflict unless remote file was last modified + if cf.compareModTime() != RemoteLastModified { + return false, "" + } + + // Don't skip if want to keep the local file + if resolution == KeepLocal { + return false, "" + } + + // Skip if we want to keep the remote file + if resolution == KeepRemote { + return true, "conflicting file, keeping remote file" + } + + if resolution == KeepLargest { + largest := cf.compareSize() + + // Skip if the remote file is largest + if largest == RemoteLargestSize { + return true, "conflicting file, remote file is largest, keeping remote" + } + + // Don't skip if the local file is largest + if largest == LocalLargestSize { + return false, "" + } + + // Keep remote if both files have the same size + if largest == EqualSize { + return true, "conflicting file, file sizes are equal, keeping remote" + } + } + + // The conditionals above should cover all cases, + // unless the programmer did something wrong, + // in which case we default to being non-destructive and skip the file + return true, "conflicting file, unhandled case" +} + +func ensureNoRemoteModifications(files []*changedFile) error { + conflicts := findRemoteConflicts(files) + if len(conflicts) == 0 { + return nil + } + + buffer := bytes.NewBufferString("") + formatConflicts(conflicts, buffer) + return fmt.Errorf(buffer.String()) +} diff --git a/gdrive.go b/gdrive.go index 43f992d8..36b4a09c 100644 --- a/gdrive.go +++ b/gdrive.go @@ -440,15 +440,21 @@ func main() { cli.NewFlagGroup("global", globalFlags...), cli.NewFlagGroup("options", cli.BoolFlag{ - Name: "dryRun", - Patterns: []string{"--dry-run"}, - Description: "Show what would have been transferred", + Name: "keepRemote", + Patterns: []string{"--keep-remote"}, + Description: "Keep remote file when a conflict is encountered", OmitValue: true, }, cli.BoolFlag{ - Name: "noProgress", - Patterns: []string{"--no-progress"}, - Description: "Hide progress", + Name: "keepLocal", + Patterns: []string{"--keep-local"}, + Description: "Keep local file when a conflict is encountered", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "keepLargest", + Patterns: []string{"--keep-largest"}, + Description: "Keep largest file when a conflict is encountered", OmitValue: true, }, cli.BoolFlag{ @@ -457,6 +463,18 @@ func main() { Description: "Delete extraneous remote files", OmitValue: true, }, + cli.BoolFlag{ + Name: "dryRun", + Patterns: []string{"--dry-run"}, + Description: "Show what would have been transferred", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "noProgress", + Patterns: []string{"--no-progress"}, + Description: "Hide progress", + OmitValue: true, + }, cli.IntFlag{ Name: "chunksize", Patterns: []string{"--chunksize"}, diff --git a/handlers_drive.go b/handlers_drive.go index aaf957c3..9f85fd6c 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -128,6 +128,7 @@ func uploadSyncHandler(ctx cli.Context) { DryRun: args.Bool("dryRun"), DeleteExtraneous: args.Bool("deleteExtraneous"), ChunkSize: args.Int64("chunksize"), + Resolution: conflictResolution(args), Comparer: NewCachedMd5Comparer(cachePath), }) checkErr(err) From 46e9f195b46fcabbf1fa40e8cf96a66c425c4ddb Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 13 Feb 2016 11:37:19 +0100 Subject: [PATCH 108/195] Download to tmp file and rename on success --- drive/sync_download.go | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/drive/sync_download.go b/drive/sync_download.go index 9dcd7191..ec496e79 100644 --- a/drive/sync_download.go +++ b/drive/sync_download.go @@ -196,7 +196,7 @@ func (self *Drive) downloadRemoteFile(id, fpath string, args DownloadSyncArgs, t if isBackendError(err) && try < MaxBackendErrorRetries { exponentialBackoffSleep(try) try++ - self.downloadRemoteFile(id, fpath, args, try) + return self.downloadRemoteFile(id, fpath, args, try) } else { return fmt.Errorf("Failed to download file: %s", err) } @@ -213,28 +213,34 @@ func (self *Drive) downloadRemoteFile(id, fpath string, args DownloadSyncArgs, t return err } + // Download to tmp file + tmpPath := fpath + ".incomplete" + // Create new file - outFile, err := os.Create(fpath) + outFile, err := os.Create(tmpPath) if err != nil { return fmt.Errorf("Unable to create local file: %s", err) } - // Close file on function exit - defer outFile.Close() - // Save file to disk _, err = io.Copy(outFile, srcReader) if err != nil { + outFile.Close() if try < MaxBackendErrorRetries { exponentialBackoffSleep(try) try++ - self.downloadRemoteFile(id, fpath, args, try) + return self.downloadRemoteFile(id, fpath, args, try) } else { + os.Remove(tmpPath) return fmt.Errorf("Download was interrupted: %s", err) } } - return nil + // Close file + outFile.Close() + + // Rename tmp file to proper filename + return os.Rename(tmpPath, fpath) } func (self *Drive) deleteExtraneousLocalFiles(files *syncFiles, args DownloadSyncArgs) error { From 60c06768d323e17ad814a7aabdd509453af2a422 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 13 Feb 2016 11:39:04 +0100 Subject: [PATCH 109/195] Return result from recursive call --- drive/sync_upload.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drive/sync_upload.go b/drive/sync_upload.go index 949d8a6b..fbfa1a65 100644 --- a/drive/sync_upload.go +++ b/drive/sync_upload.go @@ -279,7 +279,7 @@ func (self *Drive) createMissingRemoteDir(args createMissingRemoteDirArgs) (*dri if isBackendError(err) && args.try < MaxBackendErrorRetries { exponentialBackoffSleep(args.try) args.try++ - self.createMissingRemoteDir(args) + return self.createMissingRemoteDir(args) } else { return nil, fmt.Errorf("Failed to create directory: %s", err) } @@ -315,7 +315,7 @@ func (self *Drive) uploadMissingFile(parentId string, lf *LocalFile, args Upload if isBackendError(err) && try < MaxBackendErrorRetries { exponentialBackoffSleep(try) try++ - self.uploadMissingFile(parentId, lf, args, try) + return self.uploadMissingFile(parentId, lf, args, try) } else { return fmt.Errorf("Failed to upload file: %s", err) } @@ -347,7 +347,7 @@ func (self *Drive) updateChangedFile(cf *changedFile, args UploadSyncArgs, try i if isBackendError(err) && try < MaxBackendErrorRetries { exponentialBackoffSleep(try) try++ - self.updateChangedFile(cf, args, try) + return self.updateChangedFile(cf, args, try) } else { return fmt.Errorf("Failed to update file: %s", err) } @@ -362,7 +362,7 @@ func (self *Drive) deleteRemoteFile(rf *RemoteFile, args UploadSyncArgs, try int if isBackendError(err) && try < MaxBackendErrorRetries { exponentialBackoffSleep(try) try++ - self.deleteRemoteFile(rf, args, try) + return self.deleteRemoteFile(rf, args, try) } else { return fmt.Errorf("Failed to delete file: %s", err) } From f9d75405d586b3b4e196ad5b9f749b3c99faf55c Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 13 Feb 2016 11:46:03 +0100 Subject: [PATCH 110/195] Move dry run check out of loop --- drive/sync_download.go | 12 ++++-------- drive/sync_upload.go | 25 +++++++++++++------------ 2 files changed, 17 insertions(+), 20 deletions(-) diff --git a/drive/sync_download.go b/drive/sync_download.go index ec496e79..918c7316 100644 --- a/drive/sync_download.go +++ b/drive/sync_download.go @@ -145,10 +145,6 @@ func (self *Drive) downloadMissingFiles(files *syncFiles, args DownloadSyncArgs) } fmt.Fprintf(args.Out, "[%04d/%04d] Downloading %s -> %s\n", i + 1, missingCount, rf.relPath, filepath.Join(filepath.Base(args.Path), rf.relPath)) - if args.DryRun { - continue - } - err = self.downloadRemoteFile(rf.file.Id, absPath, args, 0) if err != nil { return err @@ -177,10 +173,6 @@ func (self *Drive) downloadChangedFiles(changedFiles []*changedFile, args Downlo } fmt.Fprintf(args.Out, "[%04d/%04d] Downloading %s -> %s\n", i + 1, changedCount, cf.remote.relPath, filepath.Join(filepath.Base(args.Path), cf.remote.relPath)) - if args.DryRun { - continue - } - err = self.downloadRemoteFile(cf.remote.file.Id, absPath, args, 0) if err != nil { return err @@ -191,6 +183,10 @@ func (self *Drive) downloadChangedFiles(changedFiles []*changedFile, args Downlo } func (self *Drive) downloadRemoteFile(id, fpath string, args DownloadSyncArgs, try int) error { + if args.DryRun { + return nil + } + res, err := self.service.Files.Get(id).Download() if err != nil { if isBackendError(err) && try < MaxBackendErrorRetries { diff --git a/drive/sync_upload.go b/drive/sync_upload.go index fbfa1a65..2c4a3636 100644 --- a/drive/sync_upload.go +++ b/drive/sync_upload.go @@ -194,10 +194,6 @@ func (self *Drive) uploadMissingFiles(files *syncFiles, args UploadSyncArgs) err fmt.Fprintf(args.Out, "[%04d/%04d] Uploading %s -> %s\n", i + 1, missingCount, lf.relPath, filepath.Join(files.root.file.Name, lf.relPath)) - if args.DryRun { - continue - } - err := self.uploadMissingFile(parent.file.Id, lf, args, 0) if err != nil { return err @@ -222,10 +218,6 @@ func (self *Drive) updateChangedFiles(changedFiles []*changedFile, root *drive.F fmt.Fprintf(args.Out, "[%04d/%04d] Updating %s -> %s\n", i + 1, changedCount, cf.local.relPath, filepath.Join(root.Name, cf.local.relPath)) - if args.DryRun { - continue - } - err := self.updateChangedFile(cf, args, 0) if err != nil { return err @@ -249,10 +241,6 @@ func (self *Drive) deleteExtraneousRemoteFiles(files *syncFiles, args UploadSync for i, rf := range extraneousFiles { fmt.Fprintf(args.Out, "[%04d/%04d] Deleting %s\n", i + 1, extraneousCount, filepath.Join(files.root.file.Name, rf.relPath)) - if args.DryRun { - continue - } - err := self.deleteRemoteFile(rf, args, 0) if err != nil { return err @@ -289,6 +277,10 @@ func (self *Drive) createMissingRemoteDir(args createMissingRemoteDirArgs) (*dri } func (self *Drive) uploadMissingFile(parentId string, lf *LocalFile, args UploadSyncArgs, try int) error { + if args.DryRun { + return nil + } + srcFile, err := os.Open(lf.absPath) if err != nil { return fmt.Errorf("Failed to open file: %s", err) @@ -325,6 +317,10 @@ func (self *Drive) uploadMissingFile(parentId string, lf *LocalFile, args Upload } func (self *Drive) updateChangedFile(cf *changedFile, args UploadSyncArgs, try int) error { + if args.DryRun { + return nil + } + srcFile, err := os.Open(cf.local.absPath) if err != nil { return fmt.Errorf("Failed to open file: %s", err) @@ -357,6 +353,11 @@ func (self *Drive) updateChangedFile(cf *changedFile, args UploadSyncArgs, try i } func (self *Drive) deleteRemoteFile(rf *RemoteFile, args UploadSyncArgs, try int) error { + if args.DryRun { + return nil + } + + err := self.service.Files.Delete(rf.file.Id).Do() if err != nil { if isBackendError(err) && try < MaxBackendErrorRetries { From fc24aa48336d794998bf3e45356fb34721162b8a Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 13 Feb 2016 17:18:53 +0100 Subject: [PATCH 111/195] Correct used space / free space --- drive/about.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drive/about.go b/drive/about.go index 01d70734..4c23ab88 100644 --- a/drive/about.go +++ b/drive/about.go @@ -21,8 +21,8 @@ func (self *Drive) About(args AboutArgs) (err error) { quota := about.StorageQuota fmt.Fprintf(args.Out, "User: %s, %s\n", user.DisplayName, user.EmailAddress) - fmt.Fprintf(args.Out, "Used: %s\n", formatSize(quota.UsageInDrive, args.SizeInBytes)) - fmt.Fprintf(args.Out, "Free: %s\n", formatSize(quota.Limit - quota.UsageInDrive, args.SizeInBytes)) + fmt.Fprintf(args.Out, "Used: %s\n", formatSize(quota.Usage, args.SizeInBytes)) + fmt.Fprintf(args.Out, "Free: %s\n", formatSize(quota.Limit - quota.Usage, args.SizeInBytes)) fmt.Fprintf(args.Out, "Total: %s\n", formatSize(quota.Limit, args.SizeInBytes)) fmt.Fprintf(args.Out, "Max upload size: %s\n", formatSize(about.MaxUploadSize, args.SizeInBytes)) return From 96d229360a687d9a660fe9bf7e01170fbde16798 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 13 Feb 2016 17:22:07 +0100 Subject: [PATCH 112/195] Typo --- drive/sync_download.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drive/sync_download.go b/drive/sync_download.go index 918c7316..fb7b3ae8 100644 --- a/drive/sync_download.go +++ b/drive/sync_download.go @@ -44,7 +44,7 @@ func (self *Drive) DownloadSync(args DownloadSyncArgs) error { fmt.Fprintf(args.Out, "Found %d local files and %d remote files\n", len(files.local), len(files.remote)) - // Ensure that that we don't overwrite any local changes + // Ensure that we don't overwrite any local changes if args.Resolution == NoResolution { err = ensureNoLocalModifications(changedFiles) if err != nil { From dd623e82015df217a84cffd6592c379aad3b4c29 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 13 Feb 2016 17:41:40 +0100 Subject: [PATCH 113/195] Ensure that there is enough free space on drive --- drive/sync_upload.go | 45 +++++++++++++++++++++++++++++++++++++++----- 1 file changed, 40 insertions(+), 5 deletions(-) diff --git a/drive/sync_upload.go b/drive/sync_upload.go index 2c4a3636..a94e5070 100644 --- a/drive/sync_upload.go +++ b/drive/sync_upload.go @@ -44,12 +44,18 @@ func (self *Drive) UploadSync(args UploadSyncArgs) error { return err } - // Find changed files + // Find missing and changed files changedFiles := files.filterChangedLocalFiles() + missingFiles := files.filterMissingRemoteFiles() fmt.Fprintf(args.Out, "Found %d local files and %d remote files\n", len(files.local), len(files.remote)) - // Ensure that that we don't overwrite any remote changes + // Ensure that there is enough free space on drive + if ok, msg := self.checkRemoteFreeSpace(missingFiles, changedFiles); !ok { + return fmt.Errorf(msg) + } + + // Ensure that we don't overwrite any remote changes if args.Resolution == NoResolution { err = ensureNoRemoteModifications(changedFiles) if err != nil { @@ -64,7 +70,7 @@ func (self *Drive) UploadSync(args UploadSyncArgs) error { } // Upload missing files - err = self.uploadMissingFiles(files, args) + err = self.uploadMissingFiles(missingFiles, files, args) if err != nil { return err } @@ -177,8 +183,7 @@ type createMissingRemoteDirArgs struct { try int } -func (self *Drive) uploadMissingFiles(files *syncFiles, args UploadSyncArgs) error { - missingFiles := files.filterMissingRemoteFiles() +func (self *Drive) uploadMissingFiles(missingFiles []*LocalFile, files *syncFiles, args UploadSyncArgs) error { missingCount := len(missingFiles) if missingCount > 0 { @@ -433,3 +438,33 @@ func ensureNoRemoteModifications(files []*changedFile) error { formatConflicts(conflicts, buffer) return fmt.Errorf(buffer.String()) } + +func (self *Drive) checkRemoteFreeSpace(missingFiles []*LocalFile, changedFiles []*changedFile) (bool, string) { + about, err := self.service.About.Get().Fields("storageQuota").Do() + if err != nil { + return false, fmt.Sprintf("Failed to determine free space: %s", err) + } + + quota := about.StorageQuota + if quota.Limit == 0 { + return true, "" + } + + freeSpace := quota.Limit - quota.Usage + + var totalSize int64 + + for _, lf := range missingFiles { + totalSize += lf.Size() + } + + for _, cf := range changedFiles { + totalSize += cf.local.Size() + } + + if totalSize > freeSpace { + return false, fmt.Sprintf("Not enough free space, have %s need %s", formatSize(freeSpace, false), formatSize(totalSize, false)) + } + + return true, "" +} From 03384c697930351faae77ab6e750a67ce501641d Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 13 Feb 2016 18:28:33 +0100 Subject: [PATCH 114/195] Download to tmp file and rename on success --- drive/download.go | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/drive/download.go b/drive/download.go index 8477fd71..29af98ef 100644 --- a/drive/download.go +++ b/drive/download.go @@ -73,21 +73,23 @@ func (self *Drive) downloadBinary(f *drive.File, args DownloadArgs) error { return err } + // Download to tmp file + tmpPath := filename + ".incomplete" + // Create new file - outFile, err := os.Create(filename) + outFile, err := os.Create(tmpPath) if err != nil { return fmt.Errorf("Unable to create new file: %s", err) } - // Close file on function exit - defer outFile.Close() - fmt.Fprintf(args.Out, "\nDownloading %s...\n", f.Name) started := time.Now() // Save file to disk bytes, err := io.Copy(outFile, srcReader) if err != nil { + outFile.Close() + os.Remove(tmpPath) return fmt.Errorf("Failed saving file: %s", err) } @@ -99,7 +101,12 @@ func (self *Drive) downloadBinary(f *drive.File, args DownloadArgs) error { //if deleteSourceFile { // self.Delete(args.Id) //} - return nil + + // Close File + outFile.Close() + + // Rename tmp file to proper filename + return os.Rename(tmpPath, filename) } func (self *Drive) downloadDirectory(parent *drive.File, args DownloadArgs) error { From 6d78f27d416bae8d446f495ca5414894410d5e5b Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 13 Feb 2016 18:56:31 +0100 Subject: [PATCH 115/195] Download to tmp file and rename on success --- drive/revision_download.go | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/drive/revision_download.go b/drive/revision_download.go index f06dac3e..be641870 100644 --- a/drive/revision_download.go +++ b/drive/revision_download.go @@ -49,25 +49,28 @@ func (self *Drive) DownloadRevision(args DownloadRevisionArgs) (err error) { return fmt.Errorf("File '%s' already exists, use --force to overwrite", rev.OriginalFilename) } + // Download to tmp file + tmpPath := rev.OriginalFilename + ".incomplete" + // Create new file - outFile, err := os.Create(rev.OriginalFilename) + outFile, err := os.Create(tmpPath) if err != nil { return fmt.Errorf("Unable to create new file: %s", err) } - // Close file on function exit - defer outFile.Close() - // Save file to disk bytes, err := io.Copy(outFile, srcReader) if err != nil { + outFile.Close() + os.Remove(tmpPath) return fmt.Errorf("Failed saving file: %s", err) } fmt.Fprintf(args.Out, "Downloaded '%s' at %s, total %d\n", rev.OriginalFilename, "x/s", bytes) - //if deleteSourceFile { - // self.Delete(args.Id) - //} - return + // Close File + outFile.Close() + + // Rename tmp file to proper filename + return os.Rename(tmpPath, rev.OriginalFilename) } From 46f65e8a2d158a0421f2afc8f77ea441df1de35f Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 13 Feb 2016 22:34:17 +0100 Subject: [PATCH 116/195] Consolidate file download logic, add --path flag to revision download --- drive/download.go | 72 ++++++++++++++++++++++++++++---------- drive/revision_download.go | 51 +++++++++++---------------- gdrive.go | 5 +++ handlers_drive.go | 1 + 4 files changed, 81 insertions(+), 48 deletions(-) diff --git a/drive/download.go b/drive/download.go index 29af98ef..d392a32a 100644 --- a/drive/download.go +++ b/drive/download.go @@ -3,6 +3,7 @@ package drive import ( "fmt" "io" + "io/ioutil" "os" "time" "path/filepath" @@ -52,37 +53,74 @@ func (self *Drive) downloadBinary(f *drive.File, args DownloadArgs) error { // Close body on function exit defer res.Body.Close() - // Wrap response body in progress reader - srcReader := getProgressReader(res.Body, args.Progress, res.ContentLength) - + // Discard other output if file is written to stdout + out := args.Out if args.Stdout { - // Write file content to stdout - _, err := io.Copy(args.Out, srcReader) + out = ioutil.Discard + } + + // Path to file + fpath := filepath.Join(args.Path, f.Name) + + fmt.Fprintf(out, "Downloading %s -> %s\n", f.Name, fpath) + + bytes, rate, err := self.saveFile(saveFileArgs{ + out: args.Out, + body: res.Body, + contentLength: res.ContentLength, + fpath: fpath, + force: args.Force, + stdout: args.Stdout, + progress: args.Progress, + }) + + if err != nil { return err } - filename := filepath.Join(args.Path, f.Name) + fmt.Fprintf(out, "Download complete, rate: %s/s, total size: %s\n", formatSize(rate, false), formatSize(bytes, false)) + return nil +} + +type saveFileArgs struct { + out io.Writer + body io.Reader + contentLength int64 + fpath string + force bool + stdout bool + progress io.Writer +} + +func (self *Drive) saveFile(args saveFileArgs) (int64, int64, error) { + // Wrap response body in progress reader + srcReader := getProgressReader(args.body, args.progress, args.contentLength) + + if args.stdout { + // Write file content to stdout + _, err := io.Copy(args.out, srcReader) + return 0, 0, err + } // Check if file exists - if !args.Force && fileExists(filename) { - return fmt.Errorf("File '%s' already exists, use --force to overwrite", filename) + if !args.force && fileExists(args.fpath) { + return 0, 0, fmt.Errorf("File '%s' already exists, use --force to overwrite", args.fpath) } // Ensure any parent directories exists - if err = mkdir(filename); err != nil { - return err + if err := mkdir(args.fpath); err != nil { + return 0, 0, err } // Download to tmp file - tmpPath := filename + ".incomplete" + tmpPath := args.fpath + ".incomplete" // Create new file outFile, err := os.Create(tmpPath) if err != nil { - return fmt.Errorf("Unable to create new file: %s", err) + return 0, 0, fmt.Errorf("Unable to create new file: %s", err) } - fmt.Fprintf(args.Out, "\nDownloading %s...\n", f.Name) started := time.Now() // Save file to disk @@ -90,13 +128,11 @@ func (self *Drive) downloadBinary(f *drive.File, args DownloadArgs) error { if err != nil { outFile.Close() os.Remove(tmpPath) - return fmt.Errorf("Failed saving file: %s", err) + return 0, 0, fmt.Errorf("Failed saving file: %s", err) } // Calculate average download rate - rate := calcRate(f.Size, started, time.Now()) - - fmt.Fprintf(args.Out, "Downloaded '%s' at %s/s, total %s\n", filename, formatSize(rate, false), formatSize(bytes, false)) + rate := calcRate(bytes, started, time.Now()) //if deleteSourceFile { // self.Delete(args.Id) @@ -106,7 +142,7 @@ func (self *Drive) downloadBinary(f *drive.File, args DownloadArgs) error { outFile.Close() // Rename tmp file to proper filename - return os.Rename(tmpPath, filename) + return bytes, rate, os.Rename(tmpPath, args.fpath) } func (self *Drive) downloadDirectory(parent *drive.File, args DownloadArgs) error { diff --git a/drive/revision_download.go b/drive/revision_download.go index be641870..9cc9d1da 100644 --- a/drive/revision_download.go +++ b/drive/revision_download.go @@ -2,8 +2,9 @@ package drive import ( "fmt" + "path/filepath" "io" - "os" + "io/ioutil" ) type DownloadRevisionArgs struct { @@ -11,6 +12,7 @@ type DownloadRevisionArgs struct { Progress io.Writer FileId string RevisionId string + Path string Force bool Stdout bool } @@ -35,42 +37,31 @@ func (self *Drive) DownloadRevision(args DownloadRevisionArgs) (err error) { // Close body on function exit defer res.Body.Close() - // Wrap response body in progress reader - srcReader := getProgressReader(res.Body, args.Progress, res.ContentLength) - + // Discard other output if file is written to stdout + out := args.Out if args.Stdout { - // Write file content to stdout - _, err := io.Copy(args.Out, srcReader) - return err + out = ioutil.Discard } - // Check if file exists - if !args.Force && fileExists(rev.OriginalFilename) { - return fmt.Errorf("File '%s' already exists, use --force to overwrite", rev.OriginalFilename) - } + // Path to file + fpath := filepath.Join(args.Path, rev.OriginalFilename) - // Download to tmp file - tmpPath := rev.OriginalFilename + ".incomplete" + fmt.Fprintf(out, "Downloading %s -> %s\n", rev.OriginalFilename, fpath) - // Create new file - outFile, err := os.Create(tmpPath) - if err != nil { - return fmt.Errorf("Unable to create new file: %s", err) - } + bytes, rate, err := self.saveFile(saveFileArgs{ + out: args.Out, + body: res.Body, + contentLength: res.ContentLength, + fpath: fpath, + force: args.Force, + stdout: args.Stdout, + progress: args.Progress, + }) - // Save file to disk - bytes, err := io.Copy(outFile, srcReader) if err != nil { - outFile.Close() - os.Remove(tmpPath) - return fmt.Errorf("Failed saving file: %s", err) + return err } - fmt.Fprintf(args.Out, "Downloaded '%s' at %s, total %d\n", rev.OriginalFilename, "x/s", bytes) - - // Close File - outFile.Close() - - // Rename tmp file to proper filename - return os.Rename(tmpPath, rev.OriginalFilename) + fmt.Fprintf(out, "Download complete, rate: %s/s, total size: %s\n", formatSize(rate, false), formatSize(bytes, false)) + return nil } diff --git a/gdrive.go b/gdrive.go index 36b4a09c..d286f1b9 100644 --- a/gdrive.go +++ b/gdrive.go @@ -577,6 +577,11 @@ func main() { Description: "Write file content to stdout", OmitValue: true, }, + cli.StringFlag{ + Name: "path", + Patterns: []string{"--path"}, + Description: "Download path", + }, ), }, }, diff --git a/handlers_drive.go b/handlers_drive.go index 9f85fd6c..fcb6cd0f 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -82,6 +82,7 @@ func downloadRevisionHandler(ctx cli.Context) { RevisionId: args.String("revisionId"), Force: args.Bool("force"), Stdout: args.Bool("stdout"), + Path: args.String("path"), Progress: progressWriter(args.Bool("noProgress")), }) checkErr(err) From 2b11e0bf5e54d748ec4bafb11ea7b08b6dc51294 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 14 Feb 2016 14:18:49 +0100 Subject: [PATCH 117/195] No need to open file --- drive/upload.go | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/drive/upload.go b/drive/upload.go index 3a1a34c4..ed55cf1d 100644 --- a/drive/upload.go +++ b/drive/upload.go @@ -32,14 +32,9 @@ func (self *Drive) Upload(args UploadArgs) error { } func (self *Drive) upload(args UploadArgs) error { - f, err := os.Open(args.Path) + info, err := os.Stat(args.Path) if err != nil { - return fmt.Errorf("Failed to open file: %s", err) - } - - info, err := f.Stat() - if err != nil { - return fmt.Errorf("Failed getting file metadata: %s", err) + return fmt.Errorf("Failed stat file: %s", err) } if info.IsDir() && !args.Recursive { From 453c097b053b8a85395caf117136f7aec1d1ada4 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 14 Feb 2016 21:51:49 +0100 Subject: [PATCH 118/195] Change output text, refactoring --- drive/download.go | 61 ++++++++++++++++++++++++-------------------- drive/export.go | 10 ++++---- drive/import.go | 6 ++--- drive/mkdir.go | 4 +-- drive/update.go | 31 ++++++++++------------- drive/upload.go | 64 +++++++++++++++++++++++++---------------------- drive/util.go | 14 +++++++++++ gdrive.go | 6 ----- handlers_drive.go | 1 - 9 files changed, 103 insertions(+), 94 deletions(-) diff --git a/drive/download.go b/drive/download.go index d392a32a..3ed73df2 100644 --- a/drive/download.go +++ b/drive/download.go @@ -3,7 +3,6 @@ package drive import ( "fmt" "io" - "io/ioutil" "os" "time" "path/filepath" @@ -22,49 +21,64 @@ type DownloadArgs struct { } func (self *Drive) Download(args DownloadArgs) error { - return self.download(args) -} + if args.Recursive { + return self.downloadRecursive(args) + } -func (self *Drive) download(args DownloadArgs) error { f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "mimeType", "md5Checksum").Do() if err != nil { return fmt.Errorf("Failed to get file: %s", err) } - if isDir(f) && !args.Recursive { + if isDir(f) { return fmt.Errorf("'%s' is a directory, use --recursive to download directories", f.Name) - } else if isDir(f) && args.Recursive { + } + + if !isBinary(f) { + return fmt.Errorf("'%s' is a google document and must be exported, see the export command", f.Name) + } + + bytes, rate, err := self.downloadBinary(f, args) + + if !args.Stdout { + fmt.Fprintf(args.Out, "Downloaded %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(bytes, false)) + } + return err +} + +func (self *Drive) downloadRecursive(args DownloadArgs) error { + f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "mimeType", "md5Checksum").Do() + if err != nil { + return fmt.Errorf("Failed to get file: %s", err) + } + + if isDir(f) { return self.downloadDirectory(f, args) } else if isBinary(f) { - return self.downloadBinary(f, args) - } else if !args.Recursive { - return fmt.Errorf("'%s' is a google document and must be exported, see the export command", f.Name) + _, _, err = self.downloadBinary(f, args) + return err } return nil } -func (self *Drive) downloadBinary(f *drive.File, args DownloadArgs) error { +func (self *Drive) downloadBinary(f *drive.File, args DownloadArgs) (int64, int64, error) { res, err := self.service.Files.Get(f.Id).Download() if err != nil { - return fmt.Errorf("Failed to download file: %s", err) + return 0, 0, fmt.Errorf("Failed to download file: %s", err) } // Close body on function exit defer res.Body.Close() - // Discard other output if file is written to stdout - out := args.Out - if args.Stdout { - out = ioutil.Discard - } - // Path to file fpath := filepath.Join(args.Path, f.Name) - fmt.Fprintf(out, "Downloading %s -> %s\n", f.Name, fpath) + if !args.Stdout { + fmt.Fprintf(args.Out, "Downloading %s -> %s\n", f.Name, fpath) + } - bytes, rate, err := self.saveFile(saveFileArgs{ + return self.saveFile(saveFileArgs{ out: args.Out, body: res.Body, contentLength: res.ContentLength, @@ -73,13 +87,6 @@ func (self *Drive) downloadBinary(f *drive.File, args DownloadArgs) error { stdout: args.Stdout, progress: args.Progress, }) - - if err != nil { - return err - } - - fmt.Fprintf(out, "Download complete, rate: %s/s, total size: %s\n", formatSize(rate, false), formatSize(bytes, false)) - return nil } type saveFileArgs struct { @@ -164,7 +171,7 @@ func (self *Drive) downloadDirectory(parent *drive.File, args DownloadArgs) erro newArgs.Id = f.Id newArgs.Stdout = false - err = self.download(newArgs) + err = self.downloadRecursive(newArgs) if err != nil { return err } diff --git a/drive/export.go b/drive/export.go index 2cbc265a..c90bc100 100644 --- a/drive/export.go +++ b/drive/export.go @@ -24,7 +24,7 @@ type ExportArgs struct { Force bool } -func (self *Drive) Export(args ExportArgs) (err error) { +func (self *Drive) Export(args ExportArgs) error { f, err := self.service.Files.Get(args.Id).Fields("name", "mimeType").Do() if err != nil { return fmt.Errorf("Failed to get file: %s", err) @@ -64,13 +64,13 @@ func (self *Drive) Export(args ExportArgs) (err error) { defer outFile.Close() // Save file to disk - bytes, err := io.Copy(outFile, res.Body) + _, err = io.Copy(outFile, res.Body) if err != nil { return fmt.Errorf("Failed saving file: %s", err) } - fmt.Fprintf(args.Out, "Exported '%s' at %s, total %d\n", filename, "x/s", bytes) - return + fmt.Fprintf(args.Out, "Exported '%s' with mime type: '%s'\n", filename, exportMime) + return nil } func (self *Drive) printMimes(out io.Writer, mimeType string) error { @@ -103,7 +103,7 @@ func getExportMime(userMime, fileMime string) (string, error) { func getExportFilename(name, mimeType string) string { extensions, err := mime.ExtensionsByType(mimeType) - if err != nil { + if err != nil || len(extensions) == 0 { return name } diff --git a/drive/import.go b/drive/import.go index cb82508f..258f8289 100644 --- a/drive/import.go +++ b/drive/import.go @@ -33,7 +33,7 @@ func (self *Drive) Import(args ImportArgs) error { return fmt.Errorf("Mime type '%s' is not supported for import", fromMime) } - f, err := self.uploadFile(UploadArgs{ + f, _, err := self.uploadFile(UploadArgs{ Out: ioutil.Discard, Progress: args.Progress, Path: args.Path, @@ -45,9 +45,7 @@ func (self *Drive) Import(args ImportArgs) error { return err } - fmt.Fprintf(args.Out, "[document] id: %s, name: %s\n", f.Id, f.Name) - fmt.Fprintf(args.Out, "Imported %s with mime type: '%s'\n", args.Path, toMimes[0]) - + fmt.Fprintf(args.Out, "Imported %s with mime type: '%s'\n", f.Id, toMimes[0]) return nil } diff --git a/drive/mkdir.go b/drive/mkdir.go index aef2276f..ad358a48 100644 --- a/drive/mkdir.go +++ b/drive/mkdir.go @@ -20,7 +20,7 @@ func (self *Drive) Mkdir(args MkdirArgs) error { if err != nil { return err } - fmt.Printf("Directory '%s' created\n", f.Name) + fmt.Fprintf(args.Out, "Directory %s created\n", f.Id) return nil } @@ -36,8 +36,6 @@ func (self *Drive) mkdir(args MkdirArgs) (*drive.File, error) { return nil, fmt.Errorf("Failed to create directory: %s", err) } - fmt.Fprintf(args.Out, "\n[directory] id: %s, name: %s\n", f.Id, f.Name) - //if args.Share { // self.share(TODO) //} diff --git a/drive/update.go b/drive/update.go index 806b7054..bd9fb4cc 100644 --- a/drive/update.go +++ b/drive/update.go @@ -3,7 +3,7 @@ package drive import ( "fmt" "mime" - "os" + "time" "io" "path/filepath" "google.golang.org/api/googleapi" @@ -19,25 +19,17 @@ type UpdateArgs struct { Parents []string Mime string Recursive bool - Stdin bool Share bool ChunkSize int64 } -func (self *Drive) Update(args UpdateArgs) (err error) { - //if args.Stdin { - // self.uploadStdin() - //} - - srcFile, err := os.Open(args.Path) +func (self *Drive) Update(args UpdateArgs) error { + srcFile, srcFileInfo, err := openFile(args.Path) if err != nil { return fmt.Errorf("Failed to open file: %s", err) } - srcFileInfo, err := srcFile.Stat() - if err != nil { - return fmt.Errorf("Failed to read file metadata: %s", err) - } + defer srcFile.Close() // Instantiate empty drive file dstFile := &drive.File{} @@ -65,14 +57,17 @@ func (self *Drive) Update(args UpdateArgs) (err error) { // Wrap file in progress reader srcReader := getProgressReader(srcFile, args.Progress, srcFileInfo.Size()) - f, err := self.service.Files.Update(args.Id, dstFile).Media(srcReader, chunkSize).Do() + fmt.Fprintf(args.Out, "Uploading %s\n", args.Path) + started := time.Now() + + f, err := self.service.Files.Update(args.Id, dstFile).Fields("id", "name", "size").Media(srcReader, chunkSize).Do() if err != nil { return fmt.Errorf("Failed to upload file: %s", err) } - fmt.Fprintf(args.Out, "Uploaded '%s' at %s, total %d\n", f.Name, "x/s", f.Size) - //if args.Share { - // self.Share(TODO) - //} - return + // Calculate average upload rate + rate := calcRate(f.Size, started, time.Now()) + + fmt.Fprintf(args.Out, "Updated %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(f.Size, false)) + return nil } diff --git a/drive/upload.go b/drive/upload.go index ed55cf1d..ad3cb349 100644 --- a/drive/upload.go +++ b/drive/upload.go @@ -28,22 +28,35 @@ func (self *Drive) Upload(args UploadArgs) error { return fmt.Errorf("Chunk size is to big, max chunk size for this computer is %d", intMax() - 1) } - return self.upload(args) -} + if args.Recursive { + return self.uploadRecursive(args) + } -func (self *Drive) upload(args UploadArgs) error { info, err := os.Stat(args.Path) if err != nil { return fmt.Errorf("Failed stat file: %s", err) } - if info.IsDir() && !args.Recursive { + if info.IsDir() { return fmt.Errorf("'%s' is a directory, use --recursive to upload directories", info.Name()) - } else if info.IsDir() { + } + + f, rate, err := self.uploadFile(args) + fmt.Fprintf(args.Out, "Uploaded %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(f.Size, false)) + return err +} + +func (self *Drive) uploadRecursive(args UploadArgs) error { + info, err := os.Stat(args.Path) + if err != nil { + return fmt.Errorf("Failed stat file: %s", err) + } + + if info.IsDir() { args.Name = "" return self.uploadDirectory(args) } else { - _, err := self.uploadFile(args) + _, _, err := self.uploadFile(args) return err } } @@ -57,6 +70,7 @@ func (self *Drive) uploadDirectory(args UploadArgs) error { // Close file on function exit defer srcFile.Close() + fmt.Fprintf(args.Out, "Creating directory %s\n", srcFileInfo.Name()) // Make directory on drive f, err := self.mkdir(MkdirArgs{ Out: args.Out, @@ -81,7 +95,7 @@ func (self *Drive) uploadDirectory(args UploadArgs) error { newArgs.Parents = []string{f.Id} // Upload - err = self.upload(newArgs) + err = self.uploadRecursive(newArgs) if err != nil { return err } @@ -90,10 +104,10 @@ func (self *Drive) uploadDirectory(args UploadArgs) error { return nil } -func (self *Drive) uploadFile(args UploadArgs) (*drive.File, error) { +func (self *Drive) uploadFile(args UploadArgs) (*drive.File, int64, error) { srcFile, srcFileInfo, err := openFile(args.Path) if err != nil { - return nil, err + return nil, 0, err } // Close file on function exit @@ -125,20 +139,18 @@ func (self *Drive) uploadFile(args UploadArgs) (*drive.File, error) { // Wrap file in progress reader srcReader := getProgressReader(srcFile, args.Progress, srcFileInfo.Size()) - fmt.Fprintf(args.Out, "\nUploading %s...\n", args.Path) + fmt.Fprintf(args.Out, "Uploading %s\n", args.Path) started := time.Now() f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size", "md5Checksum").Media(srcReader, chunkSize).Do() if err != nil { - return nil, fmt.Errorf("Failed to upload file: %s", err) + return nil, 0, fmt.Errorf("Failed to upload file: %s", err) } // Calculate average upload rate rate := calcRate(f.Size, started, time.Now()) - fmt.Fprintf(args.Out, "[file] id: %s, md5: %s, name: %s\n", f.Id, f.Md5Checksum, f.Name) - fmt.Fprintf(args.Out, "Uploaded '%s' at %s/s, total %s\n", f.Name, formatSize(rate, false), formatSize(f.Size, false)) - return f, nil + return f, rate, nil } type UploadStreamArgs struct { @@ -170,28 +182,20 @@ func (self *Drive) UploadStream(args UploadStreamArgs) (err error) { // Chunk size option chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) - f, err := self.service.Files.Create(dstFile).Media(args.In, chunkSize).Do() + fmt.Fprintf(args.Out, "Uploading %s\n", dstFile.Name) + started := time.Now() + + f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size").Media(args.In, chunkSize).Do() if err != nil { return fmt.Errorf("Failed to upload file: %s", err) } - fmt.Fprintf(args.Out, "Uploaded '%s' at %s, total %d\n", f.Name, "x/s", f.Size) + // Calculate average upload rate + rate := calcRate(f.Size, started, time.Now()) + + fmt.Fprintf(args.Out, "Uploaded %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(f.Size, false)) //if args.Share { // self.Share(TODO) //} return } - -func openFile(path string) (*os.File, os.FileInfo, error) { - f, err := os.Open(path) - if err != nil { - return nil, nil, fmt.Errorf("Failed to open file: %s", err) - } - - info, err := f.Stat() - if err != nil { - return nil, nil, fmt.Errorf("Failed getting file metadata: %s", err) - } - - return f, info, nil -} diff --git a/drive/util.go b/drive/util.go index 7a7845a3..8891e121 100644 --- a/drive/util.go +++ b/drive/util.go @@ -153,3 +153,17 @@ func min(x int, y int) int { n := math.Min(float64(x), float64(y)) return int(n) } + +func openFile(path string) (*os.File, os.FileInfo, error) { + f, err := os.Open(path) + if err != nil { + return nil, nil, fmt.Errorf("Failed to open file: %s", err) + } + + info, err := f.Stat() + if err != nil { + return nil, nil, fmt.Errorf("Failed getting file metadata: %s", err) + } + + return f, info, nil +} diff --git a/gdrive.go b/gdrive.go index d286f1b9..ca076f57 100644 --- a/gdrive.go +++ b/gdrive.go @@ -219,12 +219,6 @@ func main() { Description: "Hide progress", OmitValue: true, }, - cli.BoolFlag{ - Name: "stdin", - Patterns: []string{"--stdin"}, - Description: "Use stdin as file content", - OmitValue: true, - }, cli.StringFlag{ Name: "mime", Patterns: []string{"--mime"}, diff --git a/handlers_drive.go b/handlers_drive.go index fcb6cd0f..8ba95307 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -144,7 +144,6 @@ func updateHandler(ctx cli.Context) { Name: args.String("name"), Parents: args.StringSlice("parent"), Mime: args.String("mime"), - Stdin: args.Bool("stdin"), Share: args.Bool("share"), Progress: progressWriter(args.Bool("noProgress")), ChunkSize: args.Int64("chunksize"), From ef097ce557abf04a4871429e313af52e13d31cec Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 14 Feb 2016 23:10:57 +0100 Subject: [PATCH 119/195] Add progress bar to upload from stdin --- drive/progress.go | 9 +++++++-- drive/upload.go | 6 +++++- gdrive.go | 6 ++++++ handlers_drive.go | 1 + 4 files changed, 19 insertions(+), 3 deletions(-) diff --git a/drive/progress.go b/drive/progress.go index 9d4eb5a7..989191ef 100644 --- a/drive/progress.go +++ b/drive/progress.go @@ -12,7 +12,7 @@ const MaxRateInterval = time.Second * 3 func getProgressReader(r io.Reader, w io.Writer, size int64) io.Reader { // Don't wrap reader if output is discarded or size is too small - if w == ioutil.Discard || size < 1024 * 1024 { + if w == ioutil.Discard || (size > 0 && size < 1024 * 1024) { return r } @@ -79,7 +79,12 @@ func (self *Progress) draw(isLast bool) { self.clear() // Print progress - fmt.Fprintf(self.Writer, "%s/%s", formatSize(self.progress, false), formatSize(self.Size, false)) + fmt.Fprintf(self.Writer, "%s", formatSize(self.progress, false)) + + // Print total size + if self.Size > 0 { + fmt.Fprintf(self.Writer, "/%s", formatSize(self.Size, false)) + } // Print rate if self.rate > 0 { diff --git a/drive/upload.go b/drive/upload.go index ad3cb349..4f237caa 100644 --- a/drive/upload.go +++ b/drive/upload.go @@ -161,6 +161,7 @@ type UploadStreamArgs struct { Mime string Share bool ChunkSize int64 + Progress io.Writer } func (self *Drive) UploadStream(args UploadStreamArgs) (err error) { @@ -182,10 +183,13 @@ func (self *Drive) UploadStream(args UploadStreamArgs) (err error) { // Chunk size option chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) + // Wrap file in progress reader + srcReader := getProgressReader(args.In, args.Progress, 0) + fmt.Fprintf(args.Out, "Uploading %s\n", dstFile.Name) started := time.Now() - f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size").Media(args.In, chunkSize).Do() + f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size").Media(srcReader, chunkSize).Do() if err != nil { return fmt.Errorf("Failed to upload file: %s", err) } diff --git a/gdrive.go b/gdrive.go index ca076f57..829b2396 100644 --- a/gdrive.go +++ b/gdrive.go @@ -193,6 +193,12 @@ func main() { Description: "Share file", OmitValue: true, }, + cli.BoolFlag{ + Name: "noProgress", + Patterns: []string{"--no-progress"}, + Description: "Hide progress", + OmitValue: true, + }, ), }, }, diff --git a/handlers_drive.go b/handlers_drive.go index 8ba95307..b994df5a 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -114,6 +114,7 @@ func uploadStdinHandler(ctx cli.Context) { Mime: args.String("mime"), Share: args.Bool("share"), ChunkSize: args.Int64("chunksize"), + Progress: progressWriter(args.Bool("noProgress")), }) checkErr(err) } From cd62c9d23d226b77111274144bdc81330013fdfc Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Mon, 15 Feb 2016 20:39:14 +0100 Subject: [PATCH 120/195] Show available types and roles --- gdrive.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gdrive.go b/gdrive.go index 829b2396..3b85a732 100644 --- a/gdrive.go +++ b/gdrive.go @@ -298,13 +298,13 @@ func main() { cli.StringFlag{ Name: "role", Patterns: []string{"--role"}, - Description: fmt.Sprintf("Share role. Default: %s", DefaultShareRole), + Description: fmt.Sprintf("Share role: owner/writer/commenter/reader, default: %s", DefaultShareRole), DefaultValue: DefaultShareRole, }, cli.StringFlag{ Name: "type", Patterns: []string{"--type"}, - Description: fmt.Sprintf("Share type. Default: %s", DefaultShareType), + Description: fmt.Sprintf("Share type: user/group/domain/anyone, default: %s", DefaultShareType), DefaultValue: DefaultShareType, }, cli.StringFlag{ From 713b0624b9f9ac2370a898f669a922f80a7fe8ea Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Mon, 15 Feb 2016 21:50:45 +0100 Subject: [PATCH 121/195] Add more share commands, upload + share --- drive/import.go | 2 - drive/mkdir.go | 5 --- drive/share.go | 95 +++++++++++++++++++++++++++++++++++------------ drive/update.go | 1 - drive/upload.go | 31 +++++++++++----- gdrive.go | 48 ++++++++++++------------ handlers_drive.go | 23 ++++++++++-- 7 files changed, 135 insertions(+), 70 deletions(-) diff --git a/drive/import.go b/drive/import.go index 258f8289..a3d8b3bd 100644 --- a/drive/import.go +++ b/drive/import.go @@ -13,7 +13,6 @@ type ImportArgs struct { Out io.Writer Progress io.Writer Path string - Share bool Parents []string } @@ -39,7 +38,6 @@ func (self *Drive) Import(args ImportArgs) error { Path: args.Path, Parents: args.Parents, Mime: toMimes[0], - Share: args.Share, }) if err != nil { return err diff --git a/drive/mkdir.go b/drive/mkdir.go index ad358a48..f6f0641c 100644 --- a/drive/mkdir.go +++ b/drive/mkdir.go @@ -12,7 +12,6 @@ type MkdirArgs struct { Out io.Writer Name string Parents []string - Share bool } func (self *Drive) Mkdir(args MkdirArgs) error { @@ -36,9 +35,5 @@ func (self *Drive) mkdir(args MkdirArgs) (*drive.File, error) { return nil, fmt.Errorf("Failed to create directory: %s", err) } - //if args.Share { - // self.share(TODO) - //} - return f, nil } diff --git a/drive/share.go b/drive/share.go index 43655df7..291512a8 100644 --- a/drive/share.go +++ b/drive/share.go @@ -3,6 +3,7 @@ package drive import ( "io" "fmt" + "text/tabwriter" "google.golang.org/api/drive/v3" ) @@ -13,17 +14,9 @@ type ShareArgs struct { Type string Email string Discoverable bool - Revoke bool } -func (self *Drive) Share(args ShareArgs) (err error) { - if args.Revoke { - err = self.deletePermissions(args) - if err != nil { - return fmt.Errorf("Failed delete permissions: %s", err) - } - } - +func (self *Drive) Share(args ShareArgs) error { permission := &drive.Permission{ AllowFileDiscovery: args.Discoverable, Role: args.Role, @@ -31,32 +24,86 @@ func (self *Drive) Share(args ShareArgs) (err error) { EmailAddress: args.Email, } - p, err := self.service.Permissions.Create(args.FileId, permission).Do() + _, err := self.service.Permissions.Create(args.FileId, permission).Do() + if err != nil { + return fmt.Errorf("Failed to share file: %s", err) + } + + fmt.Fprintf(args.Out, "Granted %s permission to %s\n", args.Role, args.Type) + return nil +} + +type RevokePermissionArgs struct { + Out io.Writer + FileId string + PermissionId string +} + +func (self *Drive) RevokePermission(args RevokePermissionArgs) error { + err := self.service.Permissions.Delete(args.FileId, args.PermissionId).Do() if err != nil { - return fmt.Errorf("Failed share file: %s", err) + fmt.Errorf("Failed to revoke permission: %s", err) + return err } - fmt.Fprintln(args.Out, p) - return + fmt.Fprintf(args.Out, "Permission revoked\n") + return nil +} + +type ListPermissionsArgs struct { + Out io.Writer + FileId string } -func (self *Drive) deletePermissions(args ShareArgs) error { - permList, err := self.service.Permissions.List(args.FileId).Do() +func (self *Drive) ListPermissions(args ListPermissionsArgs) error { + permList, err := self.service.Permissions.List(args.FileId).Fields("permissions(id,role,type,domain,emailAddress,allowFileDiscovery)").Do() if err != nil { + fmt.Errorf("Failed to list permissions: %s", err) return err } - for _, p := range permList.Permissions { - // Skip owner permissions - if p.Role == "owner" { - continue - } + printPermissions(printPermissionsArgs{ + out: args.Out, + permissions: permList.Permissions, + }) + return nil +} + +func (self *Drive) shareAnyoneReader(fileId string) error { + permission := &drive.Permission{ + Role: "reader", + Type: "anyone", + } - err := self.service.Permissions.Delete(args.FileId, p.Id).Do() - if err != nil { - return err - } + _, err := self.service.Permissions.Create(fileId, permission).Do() + if err != nil { + return fmt.Errorf("Failed to share file: %s", err) } return nil } + +type printPermissionsArgs struct { + out io.Writer + permissions []*drive.Permission +} + +func printPermissions(args printPermissionsArgs) { + w := new(tabwriter.Writer) + w.Init(args.out, 0, 0, 3, ' ', 0) + + fmt.Fprintln(w, "Id\tType\tRole\tEmail\tDomain\tDiscoverable") + + for _, p := range args.permissions { + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", + p.Id, + p.Type, + p.Role, + p.EmailAddress, + p.Domain, + formatBool(p.AllowFileDiscovery), + ) + } + + w.Flush() +} diff --git a/drive/update.go b/drive/update.go index bd9fb4cc..c4ee3411 100644 --- a/drive/update.go +++ b/drive/update.go @@ -19,7 +19,6 @@ type UpdateArgs struct { Parents []string Mime string Recursive bool - Share bool ChunkSize int64 } diff --git a/drive/upload.go b/drive/upload.go index 4f237caa..898e3cd2 100644 --- a/drive/upload.go +++ b/drive/upload.go @@ -43,7 +43,16 @@ func (self *Drive) Upload(args UploadArgs) error { f, rate, err := self.uploadFile(args) fmt.Fprintf(args.Out, "Uploaded %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(f.Size, false)) - return err + + if args.Share { + err = self.shareAnyoneReader(f.Id) + if err != nil { + return err + } + + fmt.Fprintf(args.Out, "File is readable by anyone at %s\n", f.WebContentLink) + } + return nil } func (self *Drive) uploadRecursive(args UploadArgs) error { @@ -76,7 +85,6 @@ func (self *Drive) uploadDirectory(args UploadArgs) error { Out: args.Out, Name: srcFileInfo.Name(), Parents: args.Parents, - Share: args.Share, }) if err != nil { return err @@ -142,7 +150,7 @@ func (self *Drive) uploadFile(args UploadArgs) (*drive.File, int64, error) { fmt.Fprintf(args.Out, "Uploading %s\n", args.Path) started := time.Now() - f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size", "md5Checksum").Media(srcReader, chunkSize).Do() + f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size", "md5Checksum", "webContentLink").Media(srcReader, chunkSize).Do() if err != nil { return nil, 0, fmt.Errorf("Failed to upload file: %s", err) } @@ -164,7 +172,7 @@ type UploadStreamArgs struct { Progress io.Writer } -func (self *Drive) UploadStream(args UploadStreamArgs) (err error) { +func (self *Drive) UploadStream(args UploadStreamArgs) error { if args.ChunkSize > intMax() - 1 { return fmt.Errorf("Chunk size is to big, max chunk size for this computer is %d", intMax() - 1) } @@ -189,7 +197,7 @@ func (self *Drive) UploadStream(args UploadStreamArgs) (err error) { fmt.Fprintf(args.Out, "Uploading %s\n", dstFile.Name) started := time.Now() - f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size").Media(srcReader, chunkSize).Do() + f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size", "webContentLink").Media(srcReader, chunkSize).Do() if err != nil { return fmt.Errorf("Failed to upload file: %s", err) } @@ -198,8 +206,13 @@ func (self *Drive) UploadStream(args UploadStreamArgs) (err error) { rate := calcRate(f.Size, started, time.Now()) fmt.Fprintf(args.Out, "Uploaded %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(f.Size, false)) - //if args.Share { - // self.Share(TODO) - //} - return + if args.Share { + err = self.shareAnyoneReader(f.Id) + if err != nil { + return err + } + + fmt.Fprintf(args.Out, "File is readable by anyone at %s\n", f.WebContentLink) + } + return nil } diff --git a/gdrive.go b/gdrive.go index 3b85a732..b600418c 100644 --- a/gdrive.go +++ b/gdrive.go @@ -230,12 +230,6 @@ func main() { Patterns: []string{"--mime"}, Description: "Force mime type", }, - cli.BoolFlag{ - Name: "share", - Patterns: []string{"--share"}, - Description: "Share file", - OmitValue: true, - }, cli.IntFlag{ Name: "chunksize", Patterns: []string{"--chunksize"}, @@ -273,12 +267,6 @@ func main() { Patterns: []string{"-p", "--parent"}, Description: "Parent id of created directory, can be specified multiple times to give many parents", }, - cli.BoolFlag{ - Name: "share", - Patterns: []string{"--share"}, - Description: "Share created directory", - OmitValue: true, - }, ), }, }, @@ -289,12 +277,6 @@ func main() { FlagGroups: cli.FlagGroups{ cli.NewFlagGroup("global", globalFlags...), cli.NewFlagGroup("options", - cli.BoolFlag{ - Name: "discoverable", - Patterns: []string{"--discoverable"}, - Description: "Make file discoverable by search engines", - OmitValue: true, - }, cli.StringFlag{ Name: "role", Patterns: []string{"--role"}, @@ -312,15 +294,37 @@ func main() { Patterns: []string{"--email"}, Description: "The email address of the user or group to share the file with. Requires 'user' or 'group' as type", }, + cli.BoolFlag{ + Name: "discoverable", + Patterns: []string{"--discoverable"}, + Description: "Make file discoverable by search engines", + OmitValue: true, + }, cli.BoolFlag{ Name: "revoke", Patterns: []string{"--revoke"}, - Description: "Delete all sharing permissions", + Description: "Delete all sharing permissions (owner roles will be skipped)", OmitValue: true, }, ), }, }, + &cli.Handler{ + Pattern: "[global] share list ", + Description: "List files permissions", + Callback: shareListHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + }, + }, + &cli.Handler{ + Pattern: "[global] share revoke ", + Description: "Revoke permission", + Callback: shareRevokeHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + }, + }, &cli.Handler{ Pattern: "[global] delete [options] ", Description: "Delete file or directory", @@ -611,12 +615,6 @@ func main() { Description: "Hide progress", OmitValue: true, }, - cli.BoolFlag{ - Name: "share", - Patterns: []string{"--share"}, - Description: "Share file", - OmitValue: true, - }, ), }, }, diff --git a/handlers_drive.go b/handlers_drive.go index b994df5a..de929a4c 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -145,7 +145,6 @@ func updateHandler(ctx cli.Context) { Name: args.String("name"), Parents: args.StringSlice("parent"), Mime: args.String("mime"), - Share: args.Bool("share"), Progress: progressWriter(args.Bool("noProgress")), ChunkSize: args.Int64("chunksize"), }) @@ -168,7 +167,6 @@ func importHandler(ctx cli.Context) { Out: os.Stdout, Path: args.String("path"), Parents: args.StringSlice("parent"), - Share: args.Bool("share"), Progress: progressWriter(args.Bool("noProgress")), }) checkErr(err) @@ -204,7 +202,6 @@ func mkdirHandler(ctx cli.Context) { Out: os.Stdout, Name: args.String("name"), Parents: args.StringSlice("parent"), - Share: args.Bool("share"), }) checkErr(err) } @@ -218,7 +215,25 @@ func shareHandler(ctx cli.Context) { Type: args.String("type"), Email: args.String("email"), Discoverable: args.Bool("discoverable"), - Revoke: args.Bool("revoke"), + }) + checkErr(err) +} + +func shareListHandler(ctx cli.Context) { + args := ctx.Args() + err := newDrive(args).ListPermissions(drive.ListPermissionsArgs{ + Out: os.Stdout, + FileId: args.String("fileId"), + }) + checkErr(err) +} + +func shareRevokeHandler(ctx cli.Context) { + args := ctx.Args() + err := newDrive(args).RevokePermission(drive.RevokePermissionArgs{ + Out: os.Stdout, + FileId: args.String("fileId"), + PermissionId: args.String("permissionId"), }) checkErr(err) } From 3f8dc6312cc9b14897ff49439fb023e16e5f47d0 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Mon, 15 Feb 2016 21:55:42 +0100 Subject: [PATCH 122/195] s/id/fileId/ --- gdrive.go | 20 ++++++++++---------- handlers_drive.go | 20 ++++++++++---------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/gdrive.go b/gdrive.go index b600418c..8b2ea979 100644 --- a/gdrive.go +++ b/gdrive.go @@ -77,7 +77,7 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global] download [options] ", + Pattern: "[global] download [options] ", Description: "Download file or directory", Callback: downloadHandler, FlagGroups: cli.FlagGroups{ @@ -203,7 +203,7 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global] update [options] ", + Pattern: "[global] update [options] ", Description: "Update file, this creates a new revision of the file", Callback: updateHandler, FlagGroups: cli.FlagGroups{ @@ -240,7 +240,7 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global] info [options] ", + Pattern: "[global] info [options] ", Description: "Show file info", Callback: infoHandler, FlagGroups: cli.FlagGroups{ @@ -271,7 +271,7 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global] share [options] ", + Pattern: "[global] share [options] ", Description: "Share file or directory", Callback: shareHandler, FlagGroups: cli.FlagGroups{ @@ -326,7 +326,7 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global] delete [options] ", + Pattern: "[global] delete [options] ", Description: "Delete file or directory", Callback: deleteHandler, FlagGroups: cli.FlagGroups{ @@ -358,7 +358,7 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global] sync list recursive [options] ", + Pattern: "[global] sync list recursive [options] ", Description: "List content of syncable directory", Callback: listRecursiveSyncHandler, FlagGroups: cli.FlagGroups{ @@ -391,7 +391,7 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global] sync download [options] ", + Pattern: "[global] sync download [options] ", Description: "Sync drive directory to local directory", Callback: downloadSyncHandler, FlagGroups: cli.FlagGroups{ @@ -437,7 +437,7 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global] sync upload [options] ", + Pattern: "[global] sync upload [options] ", Description: "Sync local directory to drive", Callback: uploadSyncHandler, FlagGroups: cli.FlagGroups{ @@ -529,7 +529,7 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global] revision list [options] ", + Pattern: "[global] revision list [options] ", Description: "List file revisions", Callback: listRevisionsHandler, FlagGroups: cli.FlagGroups{ @@ -619,7 +619,7 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global] export [options] ", + Pattern: "[global] export [options] ", Description: "Export a google document", Callback: exportHandler, FlagGroups: cli.FlagGroups{ diff --git a/handlers_drive.go b/handlers_drive.go index de929a4c..28d3095a 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -48,7 +48,7 @@ func downloadHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).Download(drive.DownloadArgs{ Out: os.Stdout, - Id: args.String("id"), + Id: args.String("fileId"), Force: args.Bool("force"), Path: args.String("path"), Recursive: args.Bool("recursive"), @@ -65,7 +65,7 @@ func downloadSyncHandler(ctx cli.Context) { Out: os.Stdout, Progress: progressWriter(args.Bool("noProgress")), Path: args.String("path"), - RootId: args.String("id"), + RootId: args.String("fileId"), DryRun: args.Bool("dryRun"), DeleteExtraneous: args.Bool("deleteExtraneous"), Resolution: conflictResolution(args), @@ -126,7 +126,7 @@ func uploadSyncHandler(ctx cli.Context) { Out: os.Stdout, Progress: progressWriter(args.Bool("noProgress")), Path: args.String("path"), - RootId: args.String("id"), + RootId: args.String("fileId"), DryRun: args.Bool("dryRun"), DeleteExtraneous: args.Bool("deleteExtraneous"), ChunkSize: args.Int64("chunksize"), @@ -140,7 +140,7 @@ func updateHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).Update(drive.UpdateArgs{ Out: os.Stdout, - Id: args.String("id"), + Id: args.String("fileId"), Path: args.String("path"), Name: args.String("name"), Parents: args.StringSlice("parent"), @@ -155,7 +155,7 @@ func infoHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).Info(drive.FileInfoArgs{ Out: os.Stdout, - Id: args.String("id"), + Id: args.String("fileId"), SizeInBytes: args.Bool("sizeInBytes"), }) checkErr(err) @@ -176,7 +176,7 @@ func exportHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).Export(drive.ExportArgs{ Out: os.Stdout, - Id: args.String("id"), + Id: args.String("fileId"), Mime: args.String("mime"), PrintMimes: args.Bool("printMimes"), Force: args.Bool("force"), @@ -188,7 +188,7 @@ func listRevisionsHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).ListRevisions(drive.ListRevisionsArgs{ Out: os.Stdout, - Id: args.String("id"), + Id: args.String("fileId"), NameWidth: args.Int64("nameWidth"), SizeInBytes: args.Bool("sizeInBytes"), SkipHeader: args.Bool("skipHeader"), @@ -210,7 +210,7 @@ func shareHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).Share(drive.ShareArgs{ Out: os.Stdout, - FileId: args.String("id"), + FileId: args.String("fileId"), Role: args.String("role"), Type: args.String("type"), Email: args.String("email"), @@ -242,7 +242,7 @@ func deleteHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).Delete(drive.DeleteArgs{ Out: os.Stdout, - Id: args.String("id"), + Id: args.String("fileId"), Recursive: args.Bool("recursive"), }) checkErr(err) @@ -261,7 +261,7 @@ func listRecursiveSyncHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).ListRecursiveSync(drive.ListRecursiveSyncArgs{ Out: os.Stdout, - RootId: args.String("id"), + RootId: args.String("fileId"), SkipHeader: args.Bool("skipHeader"), PathWidth: args.Int64("pathWidth"), SizeInBytes: args.Bool("sizeInBytes"), From a44ec784400bddfd489f86d77e72eb5ee06600fd Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Tue, 16 Feb 2016 21:22:30 +0100 Subject: [PATCH 123/195] Error handling --- auth/oauth.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/auth/oauth.go b/auth/oauth.go index b8f1d475..a9ab10ef 100644 --- a/auth/oauth.go +++ b/auth/oauth.go @@ -1,6 +1,7 @@ package auth import ( + "fmt" "net/http" "golang.org/x/oauth2" ) @@ -22,7 +23,7 @@ func NewOauthClient(clientId, clientSecret, tokenFile string, authFn authCodeFn) // Read cached token token, exists, err := ReadToken(tokenFile) if err != nil { - return nil, err + return nil, fmt.Errorf("Failed to read token: %s", err) } // Require auth code if token file does not exist @@ -31,6 +32,9 @@ func NewOauthClient(clientId, clientSecret, tokenFile string, authFn authCodeFn) authUrl := conf.AuthCodeURL("state", oauth2.AccessTypeOffline) authCode := authFn(authUrl)() token, err = conf.Exchange(oauth2.NoContext, authCode) + if err != nil { + return nil, fmt.Errorf("Failed to exchange auth code for token: %s", err) + } } return oauth2.NewClient( From 5d561380f73f2b1bcb663191e363a752cc097dcc Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Tue, 16 Feb 2016 22:17:54 +0100 Subject: [PATCH 124/195] Add delete option for upload and downloads --- drive/delete.go | 8 ++++++++ drive/download.go | 12 ++++++++++++ drive/upload.go | 10 ++++++++++ gdrive.go | 12 ++++++++++++ handlers_drive.go | 20 ++++++++++++++++++++ 5 files changed, 62 insertions(+) diff --git a/drive/delete.go b/drive/delete.go index d2469e1c..bacd4a34 100644 --- a/drive/delete.go +++ b/drive/delete.go @@ -29,3 +29,11 @@ func (self *Drive) Delete(args DeleteArgs) error { fmt.Fprintf(args.Out, "Deleted '%s'\n", f.Name) return nil } + +func (self *Drive) deleteFile(fileId string) error { + err := self.service.Files.Delete(fileId).Do() + if err != nil { + return fmt.Errorf("Failed to delete file: %s", err) + } + return nil +} diff --git a/drive/download.go b/drive/download.go index 3ed73df2..7d3dc8b3 100644 --- a/drive/download.go +++ b/drive/download.go @@ -17,6 +17,7 @@ type DownloadArgs struct { Path string Force bool Recursive bool + Delete bool Stdout bool } @@ -43,6 +44,17 @@ func (self *Drive) Download(args DownloadArgs) error { if !args.Stdout { fmt.Fprintf(args.Out, "Downloaded %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(bytes, false)) } + + if args.Delete { + err = self.deleteFile(args.Id) + if err != nil { + return fmt.Errorf("Failed to delete file: %s", err) + } + + if !args.Stdout { + fmt.Fprintf(args.Out, "Removed %s\n", args.Id) + } + } return err } diff --git a/drive/upload.go b/drive/upload.go index 898e3cd2..095c2844 100644 --- a/drive/upload.go +++ b/drive/upload.go @@ -20,6 +20,7 @@ type UploadArgs struct { Mime string Recursive bool Share bool + Delete bool ChunkSize int64 } @@ -52,6 +53,15 @@ func (self *Drive) Upload(args UploadArgs) error { fmt.Fprintf(args.Out, "File is readable by anyone at %s\n", f.WebContentLink) } + + if args.Delete { + err = os.Remove(args.Path) + if err != nil { + return fmt.Errorf("Failed to delete file: %s", err) + } + fmt.Fprintf(args.Out, "Removed %s\n", args.Path) + } + return nil } diff --git a/gdrive.go b/gdrive.go index 8b2ea979..443d677d 100644 --- a/gdrive.go +++ b/gdrive.go @@ -100,6 +100,12 @@ func main() { Patterns: []string{"--path"}, Description: "Download path", }, + cli.BoolFlag{ + Name: "delete", + Patterns: []string{"--delete"}, + Description: "Delete remote file when download is successful", + OmitValue: true, + }, cli.BoolFlag{ Name: "noProgress", Patterns: []string{"--no-progress"}, @@ -155,6 +161,12 @@ func main() { Description: "Share file", OmitValue: true, }, + cli.BoolFlag{ + Name: "delete", + Patterns: []string{"--delete"}, + Description: "Delete local file when upload is successful", + OmitValue: true, + }, cli.IntFlag{ Name: "chunksize", Patterns: []string{"--chunksize"}, diff --git a/handlers_drive.go b/handlers_drive.go index 28d3095a..13ba9c02 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -46,11 +46,13 @@ func listChangesHandler(ctx cli.Context) { func downloadHandler(ctx cli.Context) { args := ctx.Args() + checkDownloadArgs(args) err := newDrive(args).Download(drive.DownloadArgs{ Out: os.Stdout, Id: args.String("fileId"), Force: args.Bool("force"), Path: args.String("path"), + Delete: args.Bool("delete"), Recursive: args.Bool("recursive"), Stdout: args.Bool("stdout"), Progress: progressWriter(args.Bool("noProgress")), @@ -90,6 +92,7 @@ func downloadRevisionHandler(ctx cli.Context) { func uploadHandler(ctx cli.Context) { args := ctx.Args() + checkUploadArgs(args) err := newDrive(args).Upload(drive.UploadArgs{ Out: os.Stdout, Progress: progressWriter(args.Bool("noProgress")), @@ -99,6 +102,7 @@ func uploadHandler(ctx cli.Context) { Mime: args.String("mime"), Recursive: args.Bool("recursive"), Share: args.Bool("share"), + Delete: args.Bool("delete"), ChunkSize: args.Int64("chunksize"), }) checkErr(err) @@ -366,3 +370,19 @@ func conflictResolution(args cli.Arguments) drive.ConflictResolution { return drive.NoResolution } + +func checkUploadArgs(args cli.Arguments) { + if args.Bool("recursive") && args.Bool("delete") { + ExitF("--delete is not allowed for recursive uploads") + } + + if args.Bool("recursive") && args.Bool("share") { + ExitF("--share is not allowed for recursive uploads") + } +} + +func checkDownloadArgs(args cli.Arguments) { + if args.Bool("recursive") && args.Bool("delete") { + ExitF("--delete is not allowed for recursive downloads") + } +} From 7eaf0c84ebb01c84f39ed93d69a7b856d8c633ce Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 20 Feb 2016 22:47:55 +0100 Subject: [PATCH 125/195] Add TimeoutReader TimeoutReader wraps a reader and takes a cancel function as argument, the cancel function will be called when the reader is idle for too long. --- drive/timeout_reader.go | 86 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100644 drive/timeout_reader.go diff --git a/drive/timeout_reader.go b/drive/timeout_reader.go new file mode 100644 index 00000000..e228160f --- /dev/null +++ b/drive/timeout_reader.go @@ -0,0 +1,86 @@ +package drive + +import ( + "io" + "time" + "sync" + "golang.org/x/net/context" +) + +const MaxIdleTimeout = time.Second * 120 +const TimeoutTimerInterval = time.Second * 10 + +func getTimeoutReader(r io.Reader, cancel context.CancelFunc) io.Reader { + return &TimeoutReader{ + reader: r, + cancel: cancel, + mutex: &sync.Mutex{}, + } +} + +type TimeoutReader struct { + reader io.Reader + cancel context.CancelFunc + lastActivity time.Time + timer *time.Timer + mutex *sync.Mutex + done bool +} + +func (self *TimeoutReader) Read(p []byte) (int, error) { + if self.timer == nil { + self.startTimer() + } + + self.mutex.Lock() + + // Read + n, err := self.reader.Read(p) + + self.lastActivity = time.Now() + self.done = (err != nil) + + self.mutex.Unlock() + + if self.done { + self.stopTimer() + } + + return n, err +} + +func (self *TimeoutReader) startTimer() { + self.mutex.Lock() + defer self.mutex.Unlock() + + if !self.done { + self.timer = time.AfterFunc(TimeoutTimerInterval, self.timeout) + } +} + +func (self *TimeoutReader) stopTimer() { + self.mutex.Lock() + defer self.mutex.Unlock() + + if self.timer != nil { + self.timer.Stop() + } +} + +func (self *TimeoutReader) timeout() { + self.mutex.Lock() + + if self.done { + self.mutex.Unlock() + return + } + + if time.Since(self.lastActivity) > MaxIdleTimeout { + self.cancel() + self.mutex.Unlock() + return + } + + self.mutex.Unlock() + self.startTimer() +} From 308c7dceac93e7496453332139aac006da408629 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 20 Feb 2016 22:55:53 +0100 Subject: [PATCH 126/195] Handle error --- drive/upload.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drive/upload.go b/drive/upload.go index 095c2844..05f52cde 100644 --- a/drive/upload.go +++ b/drive/upload.go @@ -43,6 +43,9 @@ func (self *Drive) Upload(args UploadArgs) error { } f, rate, err := self.uploadFile(args) + if err != nil { + return err + } fmt.Fprintf(args.Out, "Uploaded %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(f.Size, false)) if args.Share { From a9e9da783481fcb8022eb52fb944cb9ee13997de Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 20 Feb 2016 23:11:23 +0100 Subject: [PATCH 127/195] Wrap media uploads in TimeoutReader --- drive/sync_upload.go | 14 ++++++++++---- drive/timeout_reader.go | 5 +++++ drive/update.go | 7 +++++-- drive/upload.go | 14 ++++++++++---- 4 files changed, 30 insertions(+), 10 deletions(-) diff --git a/drive/sync_upload.go b/drive/sync_upload.go index a94e5070..7a0833a9 100644 --- a/drive/sync_upload.go +++ b/drive/sync_upload.go @@ -305,9 +305,12 @@ func (self *Drive) uploadMissingFile(parentId string, lf *LocalFile, args Upload chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) // Wrap file in progress reader - srcReader := getProgressReader(srcFile, args.Progress, lf.info.Size()) + progressReader := getProgressReader(srcFile, args.Progress, lf.info.Size()) - _, err = self.service.Files.Create(dstFile).Fields("id", "name", "size", "md5Checksum").Media(srcReader, chunkSize).Do() + // Wrap reader in timeout reader + reader, ctx := getTimeoutReaderContext(progressReader) + + _, err = self.service.Files.Create(dstFile).Fields("id", "name", "size", "md5Checksum").Context(ctx).Media(reader, chunkSize).Do() if err != nil { if isBackendError(err) && try < MaxBackendErrorRetries { exponentialBackoffSleep(try) @@ -341,9 +344,12 @@ func (self *Drive) updateChangedFile(cf *changedFile, args UploadSyncArgs, try i chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) // Wrap file in progress reader - srcReader := getProgressReader(srcFile, args.Progress, cf.local.info.Size()) + progressReader := getProgressReader(srcFile, args.Progress, cf.local.info.Size()) + + // Wrap reader in timeout reader + reader, ctx := getTimeoutReaderContext(progressReader) - _, err = self.service.Files.Update(cf.remote.file.Id, dstFile).Media(srcReader, chunkSize).Do() + _, err = self.service.Files.Update(cf.remote.file.Id, dstFile).Context(ctx).Media(reader, chunkSize).Do() if err != nil { if isBackendError(err) && try < MaxBackendErrorRetries { exponentialBackoffSleep(try) diff --git a/drive/timeout_reader.go b/drive/timeout_reader.go index e228160f..ba2bb834 100644 --- a/drive/timeout_reader.go +++ b/drive/timeout_reader.go @@ -10,6 +10,11 @@ import ( const MaxIdleTimeout = time.Second * 120 const TimeoutTimerInterval = time.Second * 10 +func getTimeoutReaderContext(r io.Reader) (io.Reader, context.Context) { + ctx, cancel := context.WithCancel(context.TODO()) + return getTimeoutReader(r, cancel), ctx +} + func getTimeoutReader(r io.Reader, cancel context.CancelFunc) io.Reader { return &TimeoutReader{ reader: r, diff --git a/drive/update.go b/drive/update.go index c4ee3411..5bdd0408 100644 --- a/drive/update.go +++ b/drive/update.go @@ -54,12 +54,15 @@ func (self *Drive) Update(args UpdateArgs) error { chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) // Wrap file in progress reader - srcReader := getProgressReader(srcFile, args.Progress, srcFileInfo.Size()) + progressReader := getProgressReader(srcFile, args.Progress, srcFileInfo.Size()) + + // Wrap reader in timeout reader + reader, ctx := getTimeoutReaderContext(progressReader) fmt.Fprintf(args.Out, "Uploading %s\n", args.Path) started := time.Now() - f, err := self.service.Files.Update(args.Id, dstFile).Fields("id", "name", "size").Media(srcReader, chunkSize).Do() + f, err := self.service.Files.Update(args.Id, dstFile).Fields("id", "name", "size").Context(ctx).Media(reader, chunkSize).Do() if err != nil { return fmt.Errorf("Failed to upload file: %s", err) } diff --git a/drive/upload.go b/drive/upload.go index 05f52cde..2b8c7c36 100644 --- a/drive/upload.go +++ b/drive/upload.go @@ -158,12 +158,15 @@ func (self *Drive) uploadFile(args UploadArgs) (*drive.File, int64, error) { chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) // Wrap file in progress reader - srcReader := getProgressReader(srcFile, args.Progress, srcFileInfo.Size()) + progressReader := getProgressReader(srcFile, args.Progress, srcFileInfo.Size()) + + // Wrap reader in timeout reader + reader, ctx := getTimeoutReaderContext(progressReader) fmt.Fprintf(args.Out, "Uploading %s\n", args.Path) started := time.Now() - f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size", "md5Checksum", "webContentLink").Media(srcReader, chunkSize).Do() + f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size", "md5Checksum", "webContentLink").Context(ctx).Media(reader, chunkSize).Do() if err != nil { return nil, 0, fmt.Errorf("Failed to upload file: %s", err) } @@ -205,12 +208,15 @@ func (self *Drive) UploadStream(args UploadStreamArgs) error { chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) // Wrap file in progress reader - srcReader := getProgressReader(args.In, args.Progress, 0) + progressReader := getProgressReader(args.In, args.Progress, 0) + + // Wrap reader in timeout reader + reader, ctx := getTimeoutReaderContext(progressReader) fmt.Fprintf(args.Out, "Uploading %s\n", dstFile.Name) started := time.Now() - f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size", "webContentLink").Media(srcReader, chunkSize).Do() + f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size", "webContentLink").Context(ctx).Media(reader, chunkSize).Do() if err != nil { return fmt.Errorf("Failed to upload file: %s", err) } From 28c4eb923fd01d892a17844328d0090830bcd229 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 20 Feb 2016 23:30:30 +0100 Subject: [PATCH 128/195] Wrap downloads in TimeoutReader --- drive/download.go | 7 +++++-- drive/revision_download.go | 7 +++++-- drive/sync_download.go | 12 +++++++++--- drive/timeout_reader.go | 10 ++++++++++ 4 files changed, 29 insertions(+), 7 deletions(-) diff --git a/drive/download.go b/drive/download.go index 7d3dc8b3..a33373fe 100644 --- a/drive/download.go +++ b/drive/download.go @@ -75,7 +75,10 @@ func (self *Drive) downloadRecursive(args DownloadArgs) error { } func (self *Drive) downloadBinary(f *drive.File, args DownloadArgs) (int64, int64, error) { - res, err := self.service.Files.Get(f.Id).Download() + // Get timeout reader wrapper and context + timeoutReaderWrapper, ctx := getTimeoutReaderWrapperContext() + + res, err := self.service.Files.Get(f.Id).Context(ctx).Download() if err != nil { return 0, 0, fmt.Errorf("Failed to download file: %s", err) } @@ -92,7 +95,7 @@ func (self *Drive) downloadBinary(f *drive.File, args DownloadArgs) (int64, int6 return self.saveFile(saveFileArgs{ out: args.Out, - body: res.Body, + body: timeoutReaderWrapper(res.Body), contentLength: res.ContentLength, fpath: fpath, force: args.Force, diff --git a/drive/revision_download.go b/drive/revision_download.go index 9cc9d1da..039cd19e 100644 --- a/drive/revision_download.go +++ b/drive/revision_download.go @@ -29,7 +29,10 @@ func (self *Drive) DownloadRevision(args DownloadRevisionArgs) (err error) { return fmt.Errorf("Download is not supported for this file type") } - res, err := getRev.Download() + // Get timeout reader wrapper and context + timeoutReaderWrapper, ctx := getTimeoutReaderWrapperContext() + + res, err := getRev.Context(ctx).Download() if err != nil { return fmt.Errorf("Failed to download file: %s", err) } @@ -50,7 +53,7 @@ func (self *Drive) DownloadRevision(args DownloadRevisionArgs) (err error) { bytes, rate, err := self.saveFile(saveFileArgs{ out: args.Out, - body: res.Body, + body: timeoutReaderWrapper(res.Body), contentLength: res.ContentLength, fpath: fpath, force: args.Force, diff --git a/drive/sync_download.go b/drive/sync_download.go index fb7b3ae8..5016cc14 100644 --- a/drive/sync_download.go +++ b/drive/sync_download.go @@ -187,7 +187,10 @@ func (self *Drive) downloadRemoteFile(id, fpath string, args DownloadSyncArgs, t return nil } - res, err := self.service.Files.Get(id).Download() + // Get timeout reader wrapper and context + timeoutReaderWrapper, ctx := getTimeoutReaderWrapperContext() + + res, err := self.service.Files.Get(id).Context(ctx).Download() if err != nil { if isBackendError(err) && try < MaxBackendErrorRetries { exponentialBackoffSleep(try) @@ -202,7 +205,10 @@ func (self *Drive) downloadRemoteFile(id, fpath string, args DownloadSyncArgs, t defer res.Body.Close() // Wrap response body in progress reader - srcReader := getProgressReader(res.Body, args.Progress, res.ContentLength) + progressReader := getProgressReader(res.Body, args.Progress, res.ContentLength) + + // Wrap reader in timeout reader + reader := timeoutReaderWrapper(progressReader) // Ensure any parent directories exists if err = mkdir(fpath); err != nil { @@ -219,7 +225,7 @@ func (self *Drive) downloadRemoteFile(id, fpath string, args DownloadSyncArgs, t } // Save file to disk - _, err = io.Copy(outFile, srcReader) + _, err = io.Copy(outFile, reader) if err != nil { outFile.Close() if try < MaxBackendErrorRetries { diff --git a/drive/timeout_reader.go b/drive/timeout_reader.go index ba2bb834..878911bd 100644 --- a/drive/timeout_reader.go +++ b/drive/timeout_reader.go @@ -10,6 +10,16 @@ import ( const MaxIdleTimeout = time.Second * 120 const TimeoutTimerInterval = time.Second * 10 +type timeoutReaderWrapper func(io.Reader) io.Reader + +func getTimeoutReaderWrapperContext() (timeoutReaderWrapper, context.Context) { + ctx, cancel := context.WithCancel(context.TODO()) + wrapper := func(r io.Reader) io.Reader { + return getTimeoutReader(r, cancel) + } + return wrapper, ctx +} + func getTimeoutReaderContext(r io.Reader) (io.Reader, context.Context) { ctx, cancel := context.WithCancel(context.TODO()) return getTimeoutReader(r, cancel), ctx From 21cc148ec34cdb65cbbca0dfb2995beea7cbc11b Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 21 Feb 2016 00:37:17 +0100 Subject: [PATCH 129/195] Add support for user-provided refresh token --- auth/oauth.go | 42 +++++++++++++++++++++++++++++++----------- gdrive.go | 5 +++++ handlers_drive.go | 13 +++++++++++-- 3 files changed, 47 insertions(+), 13 deletions(-) diff --git a/auth/oauth.go b/auth/oauth.go index a9ab10ef..1f2884b7 100644 --- a/auth/oauth.go +++ b/auth/oauth.go @@ -2,23 +2,15 @@ package auth import ( "fmt" + "time" "net/http" "golang.org/x/oauth2" ) type authCodeFn func(string) func() string -func NewOauthClient(clientId, clientSecret, tokenFile string, authFn authCodeFn) (*http.Client, error) { - conf := &oauth2.Config{ - ClientID: clientId, - ClientSecret: clientSecret, - Scopes: []string{"https://www.googleapis.com/auth/drive"}, - RedirectURL: "urn:ietf:wg:oauth:2.0:oob", - Endpoint: oauth2.Endpoint{ - AuthURL: "https://accounts.google.com/o/oauth2/auth", - TokenURL: "https://accounts.google.com/o/oauth2/token", - }, - } +func NewFileSourceClient(clientId, clientSecret, tokenFile string, authFn authCodeFn) (*http.Client, error) { + conf := getConfig(clientId, clientSecret) // Read cached token token, exists, err := ReadToken(tokenFile) @@ -42,3 +34,31 @@ func NewOauthClient(clientId, clientSecret, tokenFile string, authFn authCodeFn) FileSource(tokenFile, token, conf), ), nil } + +func NewRefreshTokenClient(clientId, clientSecret, refreshToken string) *http.Client { + conf := getConfig(clientId, clientSecret) + + token := &oauth2.Token{ + TokenType: "Bearer", + RefreshToken: refreshToken, + Expiry: time.Now(), + } + + return oauth2.NewClient( + oauth2.NoContext, + conf.TokenSource(oauth2.NoContext, token), + ) +} + +func getConfig(clientId, clientSecret string) *oauth2.Config { + return &oauth2.Config{ + ClientID: clientId, + ClientSecret: clientSecret, + Scopes: []string{"https://www.googleapis.com/auth/drive"}, + RedirectURL: "urn:ietf:wg:oauth:2.0:oob", + Endpoint: oauth2.Endpoint{ + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://accounts.google.com/o/oauth2/token", + }, + } +} diff --git a/gdrive.go b/gdrive.go index 443d677d..3218a0c4 100644 --- a/gdrive.go +++ b/gdrive.go @@ -28,6 +28,11 @@ func main() { Description: fmt.Sprintf("Application path, default: %s", DefaultConfigDir), DefaultValue: DefaultConfigDir, }, + cli.StringFlag{ + Name: "refreshToken", + Patterns: []string{"--refresh-token"}, + Description: "Oauth refresh token used to get access token (for advanced users)", + }, } handlers := []*cli.Handler{ diff --git a/handlers_drive.go b/handlers_drive.go index 13ba9c02..4fa00cfd 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -6,6 +6,7 @@ import ( "io" "io/ioutil" "path/filepath" + "net/http" "./cli" "./auth" "./drive" @@ -309,10 +310,18 @@ func aboutExportHandler(ctx cli.Context) { checkErr(err) } -func newDrive(args cli.Arguments) *drive.Drive { +func getOauthClient(args cli.Arguments) (*http.Client, error) { + if args.String("refreshToken") != "" { + return auth.NewRefreshTokenClient(ClientId, ClientSecret, args.String("refreshToken")), nil + } + configDir := args.String("configDir") tokenPath := ConfigFilePath(configDir, TokenFilename) - oauth, err := auth.NewOauthClient(ClientId, ClientSecret, tokenPath, authCodePrompt) + return auth.NewFileSourceClient(ClientId, ClientSecret, tokenPath, authCodePrompt) +} + +func newDrive(args cli.Arguments) *drive.Drive { + oauth, err := getOauthClient(args) if err != nil { ExitF("Failed getting oauth client: %s", err.Error()) } From c1960cd2b0fc82054ebe64c6ec79555b991a4ff9 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 21 Feb 2016 00:53:32 +0100 Subject: [PATCH 130/195] Add support for user-provided access token --- auth/oauth.go | 14 ++++++++++++++ gdrive.go | 5 +++++ handlers_drive.go | 8 ++++++++ 3 files changed, 27 insertions(+) diff --git a/auth/oauth.go b/auth/oauth.go index 1f2884b7..965c7cc2 100644 --- a/auth/oauth.go +++ b/auth/oauth.go @@ -50,6 +50,20 @@ func NewRefreshTokenClient(clientId, clientSecret, refreshToken string) *http.Cl ) } +func NewAccessTokenClient(clientId, clientSecret, accessToken string) *http.Client { + conf := getConfig(clientId, clientSecret) + + token := &oauth2.Token{ + TokenType: "Bearer", + AccessToken: accessToken, + } + + return oauth2.NewClient( + oauth2.NoContext, + conf.TokenSource(oauth2.NoContext, token), + ) +} + func getConfig(clientId, clientSecret string) *oauth2.Config { return &oauth2.Config{ ClientID: clientId, diff --git a/gdrive.go b/gdrive.go index 3218a0c4..f89688b1 100644 --- a/gdrive.go +++ b/gdrive.go @@ -33,6 +33,11 @@ func main() { Patterns: []string{"--refresh-token"}, Description: "Oauth refresh token used to get access token (for advanced users)", }, + cli.StringFlag{ + Name: "accessToken", + Patterns: []string{"--access-token"}, + Description: "Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users)", + }, } handlers := []*cli.Handler{ diff --git a/handlers_drive.go b/handlers_drive.go index 4fa00cfd..3ca2c6ad 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -311,10 +311,18 @@ func aboutExportHandler(ctx cli.Context) { } func getOauthClient(args cli.Arguments) (*http.Client, error) { + if args.String("refreshToken") != "" && args.String("accessToken") != "" { + ExitF("Access token not needed when refresh token is provided") + } + if args.String("refreshToken") != "" { return auth.NewRefreshTokenClient(ClientId, ClientSecret, args.String("refreshToken")), nil } + if args.String("accessToken") != "" { + return auth.NewAccessTokenClient(ClientId, ClientSecret, args.String("accessToken")), nil + } + configDir := args.String("configDir") tokenPath := ConfigFilePath(configDir, TokenFilename) return auth.NewFileSourceClient(ClientId, ClientSecret, tokenPath, authCodePrompt) From 78207dc0823431ba86fd9179df808b7ab90d97bb Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 21 Feb 2016 00:55:02 +0100 Subject: [PATCH 131/195] Rename --- auth/{token.go => file_source.go} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename auth/{token.go => file_source.go} (100%) diff --git a/auth/token.go b/auth/file_source.go similarity index 100% rename from auth/token.go rename to auth/file_source.go From 1e2026d06b6288df38a9fd5b1938a6392c6b1fe5 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 21 Feb 2016 01:09:05 +0100 Subject: [PATCH 132/195] Indent help --- handlers_meta.go | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/handlers_meta.go b/handlers_meta.go index 67d80ca9..eebdbce2 100644 --- a/handlers_meta.go +++ b/handlers_meta.go @@ -1,8 +1,10 @@ package main import ( + "os" "fmt" "strings" + "text/tabwriter" "./cli" ) @@ -11,11 +13,16 @@ func printVersion(ctx cli.Context) { } func printHelp(ctx cli.Context) { - fmt.Printf("%s usage:\n\n", Name) + w := new(tabwriter.Writer) + w.Init(os.Stdout, 0, 0, 3, ' ', 0) + + fmt.Fprintf(w, "%s usage:\n\n", Name) for _, h := range ctx.Handlers() { - fmt.Printf("%s %s (%s)\n", Name, h.Pattern, h.Description) + fmt.Fprintf(w, "%s %s\t%s\n", Name, h.Pattern, h.Description) } + + w.Flush() } func printCommandHelp(ctx cli.Context) { @@ -35,18 +42,24 @@ func printCommandPrefixHelp(ctx cli.Context, prefix ...string) { ExitF("Command not found") } - fmt.Printf("%s %s (%s)\n", Name, handler.Pattern, handler.Description) + w := new(tabwriter.Writer) + w.Init(os.Stdout, 0, 0, 3, ' ', 0) + + fmt.Fprintf(w, "%s\n", handler.Description) + fmt.Fprintf(w, "%s %s\n", Name, handler.Pattern) for _, group := range handler.FlagGroups { - fmt.Printf("\n%s:\n", group.Name) + fmt.Fprintf(w, "\n%s:\n", group.Name) for _, flag := range group.Flags { boolFlag, isBool := flag.(cli.BoolFlag) if isBool && boolFlag.OmitValue { - fmt.Printf(" %s (%s)\n", strings.Join(flag.GetPatterns(), ", "), flag.GetDescription()) + fmt.Fprintf(w, " %s\t%s\n", strings.Join(flag.GetPatterns(), ", "), flag.GetDescription()) } else { - fmt.Printf(" %s <%s> (%s)\n", strings.Join(flag.GetPatterns(), ", "), flag.GetName(), flag.GetDescription()) + fmt.Fprintf(w, " %s <%s>\t%s\n", strings.Join(flag.GetPatterns(), ", "), flag.GetName(), flag.GetDescription()) } } } + + w.Flush() } func getHandler(handlers []*cli.Handler, prefix []string) *cli.Handler { From 453384107e41991a5417c7c3ed78da6cb1c4a805 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 21 Feb 2016 11:59:12 +0100 Subject: [PATCH 133/195] Support getting config dir from environment variable i.e. GDRIVE_CONFIG_DIR="/home/user/.gdrive" --- handlers_drive.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/handlers_drive.go b/handlers_drive.go index 3ca2c6ad..957161f9 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -323,11 +323,19 @@ func getOauthClient(args cli.Arguments) (*http.Client, error) { return auth.NewAccessTokenClient(ClientId, ClientSecret, args.String("accessToken")), nil } - configDir := args.String("configDir") + configDir := getConfigDir(args) tokenPath := ConfigFilePath(configDir, TokenFilename) return auth.NewFileSourceClient(ClientId, ClientSecret, tokenPath, authCodePrompt) } +func getConfigDir(args cli.Arguments) string { + // Use dir from environment var if present + if os.Getenv("GDRIVE_CONFIG_DIR") != "" { + return os.Getenv("GDRIVE_CONFIG_DIR") + } + return args.String("configDir") +} + func newDrive(args cli.Arguments) *drive.Drive { oauth, err := getOauthClient(args) if err != nil { From 428da4bcfedb3d3a2441d1d236f861e72ee8f5c6 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 21 Feb 2016 12:55:09 +0100 Subject: [PATCH 134/195] Support downloading files by query --- drive/download.go | 41 +++++++++++++++++++++++++++++++++++++++++ gdrive.go | 33 +++++++++++++++++++++++++++++++++ handlers_drive.go | 13 +++++++++++++ 3 files changed, 87 insertions(+) diff --git a/drive/download.go b/drive/download.go index a33373fe..1779d575 100644 --- a/drive/download.go +++ b/drive/download.go @@ -58,6 +58,47 @@ func (self *Drive) Download(args DownloadArgs) error { return err } +type DownloadQueryArgs struct { + Out io.Writer + Progress io.Writer + Query string + Path string + Force bool + Recursive bool +} + +func (self *Drive) DownloadQuery(args DownloadQueryArgs) error { + listArgs := listAllFilesArgs{ + query: args.Query, + fields: []googleapi.Field{"nextPageToken", "files(id,name,mimeType,size,md5Checksum)"}, + } + files, err := self.listAllFiles(listArgs) + if err != nil { + return fmt.Errorf("Failed to list files: %s", err) + } + + downloadArgs := DownloadArgs{ + Out: args.Out, + Progress: args.Progress, + Path: args.Path, + Force: args.Force, + } + + for _, f := range files { + if isDir(f) && args.Recursive { + err = self.downloadDirectory(f, downloadArgs) + } else if isBinary(f) { + _, _, err = self.downloadBinary(f, downloadArgs) + } + + if err != nil { + return err + } + } + + return nil +} + func (self *Drive) downloadRecursive(args DownloadArgs) error { f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "mimeType", "md5Checksum").Do() if err != nil { diff --git a/gdrive.go b/gdrive.go index f89688b1..ffcf30ff 100644 --- a/gdrive.go +++ b/gdrive.go @@ -131,6 +131,39 @@ func main() { ), }, }, + &cli.Handler{ + Pattern: "[global] download query [options] ", + Description: "Download all files and directories matching query", + Callback: downloadQueryHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", + cli.BoolFlag{ + Name: "force", + Patterns: []string{"-f", "--force"}, + Description: "Overwrite existing file", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "recursive", + Patterns: []string{"-r", "--recursive"}, + Description: "Download directories recursively, documents will be skipped", + OmitValue: true, + }, + cli.StringFlag{ + Name: "path", + Patterns: []string{"--path"}, + Description: "Download path", + }, + cli.BoolFlag{ + Name: "noProgress", + Patterns: []string{"--no-progress"}, + Description: "Hide progress", + OmitValue: true, + }, + ), + }, + }, &cli.Handler{ Pattern: "[global] upload [options] ", Description: "Upload file or directory", diff --git a/handlers_drive.go b/handlers_drive.go index 957161f9..3698d0ed 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -61,6 +61,19 @@ func downloadHandler(ctx cli.Context) { checkErr(err) } +func downloadQueryHandler(ctx cli.Context) { + args := ctx.Args() + err := newDrive(args).DownloadQuery(drive.DownloadQueryArgs{ + Out: os.Stdout, + Query: args.String("query"), + Force: args.Bool("force"), + Recursive: args.Bool("recursive"), + Path: args.String("path"), + Progress: progressWriter(args.Bool("noProgress")), + }) + checkErr(err) +} + func downloadSyncHandler(ctx cli.Context) { args := ctx.Args() cachePath := filepath.Join(args.String("configDir"), DefaultCacheFileName) From 4b95496643d36184ffa1a7b78ae2d1150ecdf0b6 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 21 Feb 2016 14:46:27 +0100 Subject: [PATCH 135/195] Add sync property and s/isSyncRoot/syncRoot/ --- drive/sync_download.go | 2 +- drive/sync_list.go | 2 +- drive/sync_upload.go | 8 ++++---- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drive/sync_download.go b/drive/sync_download.go index 5016cc14..4d84eeaa 100644 --- a/drive/sync_download.go +++ b/drive/sync_download.go @@ -95,7 +95,7 @@ func (self *Drive) getSyncRoot(rootId string) (*drive.File, error) { } // Ensure directory is a proper syncRoot - if _, ok := f.AppProperties["isSyncRoot"]; !ok { + if _, ok := f.AppProperties["syncRoot"]; !ok { return nil, fmt.Errorf("Provided id is not a sync root directory") } diff --git a/drive/sync_list.go b/drive/sync_list.go index 6ded6062..e0352397 100644 --- a/drive/sync_list.go +++ b/drive/sync_list.go @@ -16,7 +16,7 @@ type ListSyncArgs struct { func (self *Drive) ListSync(args ListSyncArgs) error { listArgs := listAllFilesArgs{ - query: "appProperties has {key='isSyncRoot' and value='true'}", + query: "appProperties has {key='syncRoot' and value='true'}", fields: []googleapi.Field{"nextPageToken", "files(id,name,mimeType,createdTime)"}, } files, err := self.listAllFiles(listArgs) diff --git a/drive/sync_upload.go b/drive/sync_upload.go index 7a0833a9..96442e11 100644 --- a/drive/sync_upload.go +++ b/drive/sync_upload.go @@ -106,7 +106,7 @@ func (self *Drive) prepareSyncRoot(args UploadSyncArgs) (*drive.File, error) { } // Return directory if syncRoot property is already set - if _, ok := f.AppProperties["isSyncRoot"]; ok { + if _, ok := f.AppProperties["syncRoot"]; ok { return f, nil } @@ -124,7 +124,7 @@ func (self *Drive) prepareSyncRoot(args UploadSyncArgs) (*drive.File, error) { // Update directory with syncRoot property dstFile := &drive.File{ - AppProperties: map[string]string{"isSyncRoot": "true"}, + AppProperties: map[string]string{"sync": "true", "syncRoot": "true"}, } f, err = self.service.Files.Update(f.Id, dstFile).Fields(fields...).Do() @@ -260,7 +260,7 @@ func (self *Drive) createMissingRemoteDir(args createMissingRemoteDirArgs) (*dri Name: args.name, MimeType: DirectoryMimeType, Parents: []string{args.parentId}, - AppProperties: map[string]string{"syncRootId": args.rootId}, + AppProperties: map[string]string{"sync": "true", "syncRootId": args.rootId}, } if args.dryRun { @@ -298,7 +298,7 @@ func (self *Drive) uploadMissingFile(parentId string, lf *LocalFile, args Upload dstFile := &drive.File{ Name: lf.info.Name(), Parents: []string{parentId}, - AppProperties: map[string]string{"syncRootId": args.RootId}, + AppProperties: map[string]string{"sync": "true", "syncRootId": args.RootId}, } // Chunk size option From 2a3b8bd5d5aad1ace85d825cbddc2bf79db28e61 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 21 Feb 2016 15:05:08 +0100 Subject: [PATCH 136/195] Prevent upload to sync dirs --- drive/sync.go | 10 ++++++++++ drive/upload.go | 12 ++++++++++++ 2 files changed, 22 insertions(+) diff --git a/drive/sync.go b/drive/sync.go index 204e7c23..2124f8fc 100644 --- a/drive/sync.go +++ b/drive/sync.go @@ -73,6 +73,16 @@ func (self *Drive) prepareSyncFiles(localPath string, root *drive.File, cmp File }, nil } +func (self *Drive) isSyncFile(id string) (bool, error) { + f, err := self.service.Files.Get(id).Fields("appProperties").Do() + if err != nil { + return false, fmt.Errorf("Failed to get file: %s", err) + } + + _, ok := f.AppProperties["sync"] + return ok, nil +} + func prepareLocalFiles(root string) ([]*LocalFile, error) { var files []*LocalFile diff --git a/drive/upload.go b/drive/upload.go index 2b8c7c36..0bbc0147 100644 --- a/drive/upload.go +++ b/drive/upload.go @@ -29,6 +29,18 @@ func (self *Drive) Upload(args UploadArgs) error { return fmt.Errorf("Chunk size is to big, max chunk size for this computer is %d", intMax() - 1) } + // Ensure that none of the parents are sync dirs + for _, parent := range args.Parents { + isSyncDir, err := self.isSyncFile(parent) + if err != nil { + return err + } + + if isSyncDir { + return fmt.Errorf("%s is a sync directory, use 'sync upload' instead", parent) + } + } + if args.Recursive { return self.uploadRecursive(args) } From 0b0c7a47e8ed2fdb06858753508f5c4e0c2aa05b Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 21 Feb 2016 15:08:45 +0100 Subject: [PATCH 137/195] s/list recursive/list content/ --- gdrive.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gdrive.go b/gdrive.go index ffcf30ff..d0a8c095 100644 --- a/gdrive.go +++ b/gdrive.go @@ -413,7 +413,7 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global] sync list recursive [options] ", + Pattern: "[global] sync list content [options] ", Description: "List content of syncable directory", Callback: listRecursiveSyncHandler, FlagGroups: cli.FlagGroups{ From a02adf6f342fb851ba9436d8c47c429d0f4b24c8 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 21 Feb 2016 15:57:28 +0100 Subject: [PATCH 138/195] Add remotePathFinder --- drive/path.go | 65 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 drive/path.go diff --git a/drive/path.go b/drive/path.go new file mode 100644 index 00000000..f5d1ad54 --- /dev/null +++ b/drive/path.go @@ -0,0 +1,65 @@ +package drive + +import ( + "fmt" + "path/filepath" + "google.golang.org/api/drive/v3" +) + +func (self *Drive) newPathfinder() *remotePathfinder { + return &remotePathfinder{ + service: self.service.Files, + files: make(map[string]*drive.File), + } +} + +type remotePathfinder struct { + service *drive.FilesService + files map[string]*drive.File +} + +func (self *remotePathfinder) absPath(f *drive.File) (string, error) { + name := f.Name + + if len(f.Parents) == 0 { + return name, nil + } + + var path []string + + for { + parent, err := self.getParent(f.Parents[0]) + if err != nil { + return "", err + } + + // Stop when we find the root dir + if len(parent.Parents) == 0 { + break + } + + path = append([]string{parent.Name}, path...) + f = parent + } + + path = append(path, name) + return filepath.Join(path...), nil +} + +func (self *remotePathfinder) getParent(id string) (*drive.File, error) { + // Check cache + if f, ok := self.files[id]; ok { + return f, nil + } + + // Fetch file from drive + f, err := self.service.Get(id).Fields("id", "name", "parents").Do() + if err != nil { + return nil, fmt.Errorf("Failed to get file: %s", err) + } + + // Save in cache + self.files[f.Id] = f + + return f, nil +} From 9ee98bcb544c3c7da54e4674c219d459cf01a133 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 21 Feb 2016 15:57:51 +0100 Subject: [PATCH 139/195] Add path to file info --- drive/info.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drive/info.go b/drive/info.go index d8383c39..aa190a82 100644 --- a/drive/info.go +++ b/drive/info.go @@ -12,24 +12,32 @@ type FileInfoArgs struct { SizeInBytes bool } -func (self *Drive) Info(args FileInfoArgs) (err error) { +func (self *Drive) Info(args FileInfoArgs) error { f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "createdTime", "modifiedTime", "md5Checksum", "mimeType", "parents", "shared", "description", "webContentLink", "webViewLink").Do() if err != nil { return fmt.Errorf("Failed to get file: %s", err) } + pathfinder := self.newPathfinder() + absPath, err := pathfinder.absPath(f) + if err != nil { + return err + } + PrintFileInfo(PrintFileInfoArgs{ Out: args.Out, File: f, + Path: absPath, SizeInBytes: args.SizeInBytes, }) - return + return nil } type PrintFileInfoArgs struct { Out io.Writer File *drive.File + Path string SizeInBytes bool } @@ -39,6 +47,7 @@ func PrintFileInfo(args PrintFileInfoArgs) { items := []kv{ kv{"Id", f.Id}, kv{"Name", f.Name}, + kv{"Path", args.Path}, kv{"Description", f.Description}, kv{"Mime", f.MimeType}, kv{"Size", formatSize(f.Size, args.SizeInBytes)}, From f20a7f8125d3250eeea106860009b44dca9513dd Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 21 Feb 2016 16:15:11 +0100 Subject: [PATCH 140/195] Add --absolute flag to show abs path to file --- drive/list.go | 15 ++++++++++++++- gdrive.go | 6 ++++++ handlers_drive.go | 1 + 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/drive/list.go b/drive/list.go index e6365852..73fdea51 100644 --- a/drive/list.go +++ b/drive/list.go @@ -17,12 +17,13 @@ type ListFilesArgs struct { SortOrder string SkipHeader bool SizeInBytes bool + AbsPath bool } func (self *Drive) List(args ListFilesArgs) (err error) { listArgs := listAllFilesArgs{ query: args.Query, - fields: []googleapi.Field{"nextPageToken", "files(id,name,md5Checksum,mimeType,size,createdTime)"}, + fields: []googleapi.Field{"nextPageToken", "files(id,name,md5Checksum,mimeType,size,createdTime,parents)"}, sortOrder: args.SortOrder, maxFiles: args.MaxFiles, } @@ -31,6 +32,18 @@ func (self *Drive) List(args ListFilesArgs) (err error) { return fmt.Errorf("Failed to list files: %s", err) } + pathfinder := self.newPathfinder() + + if args.AbsPath { + // Replace name with absolute path + for _, f := range files { + f.Name, err = pathfinder.absPath(f) + if err != nil { + return err + } + } + } + PrintFileList(PrintFileListArgs{ Out: args.Out, Files: files, diff --git a/gdrive.go b/gdrive.go index d0a8c095..d9181c63 100644 --- a/gdrive.go +++ b/gdrive.go @@ -71,6 +71,12 @@ func main() { Description: fmt.Sprintf("Width of name column, default: %d, minimum: 9, use 0 for full width", DefaultNameWidth), DefaultValue: DefaultNameWidth, }, + cli.BoolFlag{ + Name: "absPath", + Patterns: []string{"--absolute"}, + Description: "Show absolute path to file (will only show path from first parent)", + OmitValue: true, + }, cli.BoolFlag{ Name: "skipHeader", Patterns: []string{"--no-header"}, diff --git a/handlers_drive.go b/handlers_drive.go index 3698d0ed..05b1ca9c 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -28,6 +28,7 @@ func listHandler(ctx cli.Context) { SortOrder: args.String("sortOrder"), SkipHeader: args.Bool("skipHeader"), SizeInBytes: args.Bool("sizeInBytes"), + AbsPath: args.Bool("absPath"), }) checkErr(err) } From 2765f4ab467319a59224f078ec8ed8970a22025c Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 21 Feb 2016 17:16:56 +0100 Subject: [PATCH 141/195] Add go version --- handlers_meta.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/handlers_meta.go b/handlers_meta.go index eebdbce2..1c20add2 100644 --- a/handlers_meta.go +++ b/handlers_meta.go @@ -4,12 +4,13 @@ import ( "os" "fmt" "strings" + "runtime" "text/tabwriter" "./cli" ) func printVersion(ctx cli.Context) { - fmt.Printf("%s v%s\n", Name, Version) + fmt.Printf("%s v%s, %s\n", Name, Version, runtime.Version()) } func printHelp(ctx cli.Context) { From a85c7245b3b6aefdcbf4cdf4e2ad9e87e4e5b30e Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 21 Feb 2016 18:57:56 +0100 Subject: [PATCH 142/195] Add os/arch to version --- handlers_meta.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/handlers_meta.go b/handlers_meta.go index 1c20add2..52be7105 100644 --- a/handlers_meta.go +++ b/handlers_meta.go @@ -10,7 +10,9 @@ import ( ) func printVersion(ctx cli.Context) { - fmt.Printf("%s v%s, %s\n", Name, Version, runtime.Version()) + fmt.Printf("%s: %s\n", Name, Version) + fmt.Printf("Golang: %s\n", runtime.Version()) + fmt.Printf("OS/Arch: %s/%s\n", runtime.GOOS, runtime.GOARCH) } func printHelp(ctx cli.Context) { From f6d407618394e26889c61e6e57c90a7a0d81a3a1 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 21 Feb 2016 20:29:13 +0100 Subject: [PATCH 143/195] Add script to print usage in markdown --- _release/print_usage_markdown.sh | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100755 _release/print_usage_markdown.sh diff --git a/_release/print_usage_markdown.sh b/_release/print_usage_markdown.sh new file mode 100755 index 00000000..e0cbc890 --- /dev/null +++ b/_release/print_usage_markdown.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +echo '## Usage' +echo '```' +gdrive help | tail -n+3 +echo '```' + +IFS=$'\n' + +help=$(gdrive help | grep global | sed -E 's/ \[[^]]+\]//g' | sed -E 's/ <[^>]+>//g' | sed -E 's/ {2,}.+//' | sed -E 's/^gdrive //') + +for args in $help; do + cmd="gdrive help $args" + echo + eval $cmd | sed -e '1s/^/#### /' | sed -e $'1s/$/\\\n```/' + echo '```' +done From 701c7f1991ae765a51b0b7404d1edbb2dc523055 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 21 Feb 2016 20:32:54 +0100 Subject: [PATCH 144/195] Update build script --- _release/build-all.sh | 29 ++++----------- _release/crosscompile.bash | 73 -------------------------------------- 2 files changed, 7 insertions(+), 95 deletions(-) delete mode 100644 _release/crosscompile.bash diff --git a/_release/build-all.sh b/_release/build-all.sh index 414ae180..7f05036f 100755 --- a/_release/build-all.sh +++ b/_release/build-all.sh @@ -1,18 +1,15 @@ #!/bin/bash -# Load crosscompile environment -source _release/crosscompile.bash +APP_NAME="gdrive" +PLATFORMS="darwin/386 darwin/amd64 darwin/arm darwin/arm64 dragonfly/amd64 freebsd/386 freebsd/amd64 freebsd/arm linux/386 linux/amd64 linux/arm linux/arm64 linux/ppc64 linux/ppc64le linux/mips64 linux/mips64le netbsd/386 netbsd/amd64 netbsd/arm openbsd/386 openbsd/amd64 openbsd/arm plan9/386 plan9/amd64 solaris/amd64 windows/386 windows/amd64" -APP_NAME="drive" -PLATFORMS="darwin/386 darwin/amd64 freebsd/386 freebsd/amd64 linux/386 linux/amd64 linux/arm linux/rpi windows/386 windows/amd64" BIN_PATH="_release/bin" # Initialize bin dir mkdir -p $BIN_PATH -rm $BIN_PATH/* +rm $BIN_PATH/* 2> /dev/null - -# Build binary for each platform in parallel +# Build binary for each platform for PLATFORM in $PLATFORMS; do GOOS=${PLATFORM%/*} GOARCH=${PLATFORM#*/} @@ -22,23 +19,11 @@ for PLATFORM in $PLATFORMS; do BIN_NAME="${BIN_NAME}.exe" fi - # Raspberrypi seems to need arm5 binaries - if [ $GOARCH == "rpi" ]; then - export GOARM=5 - GOARCH="arm" - else - unset GOARM - fi - - BUILD_CMD="go-${GOOS}-${GOARCH} build -ldflags '-w' -o ${BIN_PATH}/${BIN_NAME} $APP_NAME.go" + export GOOS=$GOOS + export GOARCH=$GOARCH go build echo "Building $BIN_NAME" - $BUILD_CMD & -done - -# Wait for builds to complete -for job in $(jobs -p); do - wait $job + go build -ldflags '-w -s' -o ${BIN_PATH}/${BIN_NAME} done echo "All done" diff --git a/_release/crosscompile.bash b/_release/crosscompile.bash deleted file mode 100644 index ef385221..00000000 --- a/_release/crosscompile.bash +++ /dev/null @@ -1,73 +0,0 @@ -#!/bin/bash -# Copyright 2012 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -# support functions for go cross compilation - -type setopt >/dev/null 2>&1 && setopt shwordsplit -PLATFORMS="darwin/386 darwin/amd64 freebsd/386 freebsd/amd64 freebsd/arm linux/386 linux/amd64 linux/arm windows/386 windows/amd64 openbsd/386 openbsd/amd64" - -function go-alias { - GOOS=${1%/*} - GOARCH=${1#*/} - eval "function go-${GOOS}-${GOARCH} { ( GOOS=${GOOS} GOARCH=${GOARCH} go \"\$@\" ) }" -} - -function go-crosscompile-build { - GOOS=${1%/*} - GOARCH=${1#*/} - cd $(go env GOROOT)/src ; GOOS=${GOOS} GOARCH=${GOARCH} ./make.bash --no-clean 2>&1 -} - -function go-crosscompile-build-all { - FAILURES="" - for PLATFORM in $PLATFORMS; do - CMD="go-crosscompile-build ${PLATFORM}" - echo "$CMD" - $CMD || FAILURES="$FAILURES $PLATFORM" - done - if [ "$FAILURES" != "" ]; then - echo "*** go-crosscompile-build-all FAILED on $FAILURES ***" - return 1 - fi -} - -function go-all { - FAILURES="" - for PLATFORM in $PLATFORMS; do - GOOS=${PLATFORM%/*} - GOARCH=${PLATFORM#*/} - CMD="go-${GOOS}-${GOARCH} $@" - echo "$CMD" - $CMD || FAILURES="$FAILURES $PLATFORM" - done - if [ "$FAILURES" != "" ]; then - echo "*** go-all FAILED on $FAILURES ***" - return 1 - fi -} - -function go-build-all { - FAILURES="" - for PLATFORM in $PLATFORMS; do - GOOS=${PLATFORM%/*} - GOARCH=${PLATFORM#*/} - SRCFILENAME=`echo $@ | sed 's/\.go//'` - CURDIRNAME=${PWD##*/} - OUTPUT=${SRCFILENAME:-$CURDIRNAME} # if no src file given, use current dir name - CMD="go-${GOOS}-${GOARCH} build -o $OUTPUT-${GOOS}-${GOARCH} $@" - echo "$CMD" - $CMD || FAILURES="$FAILURES $PLATFORM" - done - if [ "$FAILURES" != "" ]; then - echo "*** go-build-all FAILED on $FAILURES ***" - return 1 - fi -} - -for PLATFORM in $PLATFORMS; do - go-alias $PLATFORM -done - -unset -f go-alias From 1973512dd8edca24df4124fb3dfac4a432a0d481 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 21 Feb 2016 21:03:26 +0100 Subject: [PATCH 145/195] go fmt --- auth/file_source.go | 85 +-- auth/oauth.go | 106 +-- auth/util.go | 24 +- cli/context.go | 17 +- cli/flags.go | 180 +++-- cli/handler.go | 137 ++-- cli/parser.go | 386 +++++----- compare.go | 76 +- drive/about.go | 76 +- drive/changes.go | 158 ++-- drive/delete.go | 46 +- drive/download.go | 416 +++++----- drive/drive.go | 16 +- drive/errors.go | 18 +- drive/export.go | 172 ++--- drive/import.go | 80 +- drive/info.go | 94 +-- drive/list.go | 206 ++--- drive/mkdir.go | 42 +- drive/path.go | 80 +- drive/progress.go | 148 ++-- drive/revision_delete.go | 36 +- drive/revision_download.go | 98 +-- drive/revision_list.go | 92 +-- drive/share.go | 152 ++-- drive/sync.go | 836 ++++++++++---------- drive/sync_download.go | 558 +++++++------- drive/sync_list.go | 150 ++-- drive/sync_upload.go | 801 ++++++++++--------- drive/timeout_reader.go | 114 +-- drive/update.go | 106 +-- drive/upload.go | 402 +++++----- drive/util.go | 200 ++--- gdrive.go | 1486 ++++++++++++++++++------------------ handlers_drive.go | 607 ++++++++------- handlers_meta.go | 122 +-- util.go | 102 +-- 37 files changed, 4212 insertions(+), 4213 deletions(-) diff --git a/auth/file_source.go b/auth/file_source.go index 1c1150b4..52002033 100644 --- a/auth/file_source.go +++ b/auth/file_source.go @@ -1,68 +1,67 @@ package auth import ( - "golang.org/x/oauth2" - "encoding/json" - "os" - "io/ioutil" + "encoding/json" + "golang.org/x/oauth2" + "io/ioutil" + "os" ) - func FileSource(path string, token *oauth2.Token, conf *oauth2.Config) oauth2.TokenSource { - return &fileSource{ - tokenPath: path, - tokenSource: conf.TokenSource(oauth2.NoContext, token), - } + return &fileSource{ + tokenPath: path, + tokenSource: conf.TokenSource(oauth2.NoContext, token), + } } type fileSource struct { - tokenPath string - tokenSource oauth2.TokenSource + tokenPath string + tokenSource oauth2.TokenSource } func (self *fileSource) Token() (*oauth2.Token, error) { - token, err := self.tokenSource.Token() - if err != nil { - return token, err - } + token, err := self.tokenSource.Token() + if err != nil { + return token, err + } - // Save token to file - SaveToken(self.tokenPath, token) + // Save token to file + SaveToken(self.tokenPath, token) - return token, nil + return token, nil } func ReadToken(path string) (*oauth2.Token, bool, error) { - if !fileExists(path) { - return nil, false, nil - } + if !fileExists(path) { + return nil, false, nil + } - content, err := ioutil.ReadFile(path) - if err != nil { - return nil, true, err - } - token := &oauth2.Token{} - return token, true, json.Unmarshal(content, token) + content, err := ioutil.ReadFile(path) + if err != nil { + return nil, true, err + } + token := &oauth2.Token{} + return token, true, json.Unmarshal(content, token) } func SaveToken(path string, token *oauth2.Token) error { - data, err := json.MarshalIndent(token, "", " ") - if err != nil { - return err - } + data, err := json.MarshalIndent(token, "", " ") + if err != nil { + return err + } - if err = mkdir(path); err != nil { - return err - } + if err = mkdir(path); err != nil { + return err + } - // Write to temp file first - tmpFile := path + ".tmp" - err = ioutil.WriteFile(tmpFile, data, 0600) - if err != nil { - os.Remove(tmpFile) - return err - } + // Write to temp file first + tmpFile := path + ".tmp" + err = ioutil.WriteFile(tmpFile, data, 0600) + if err != nil { + os.Remove(tmpFile) + return err + } - // Move file to correct path - return os.Rename(tmpFile, path) + // Move file to correct path + return os.Rename(tmpFile, path) } diff --git a/auth/oauth.go b/auth/oauth.go index 965c7cc2..150642cc 100644 --- a/auth/oauth.go +++ b/auth/oauth.go @@ -1,78 +1,78 @@ package auth import ( - "fmt" - "time" - "net/http" - "golang.org/x/oauth2" + "fmt" + "golang.org/x/oauth2" + "net/http" + "time" ) type authCodeFn func(string) func() string func NewFileSourceClient(clientId, clientSecret, tokenFile string, authFn authCodeFn) (*http.Client, error) { - conf := getConfig(clientId, clientSecret) + conf := getConfig(clientId, clientSecret) - // Read cached token - token, exists, err := ReadToken(tokenFile) - if err != nil { - return nil, fmt.Errorf("Failed to read token: %s", err) - } + // Read cached token + token, exists, err := ReadToken(tokenFile) + if err != nil { + return nil, fmt.Errorf("Failed to read token: %s", err) + } - // Require auth code if token file does not exist - // or refresh token is missing - if !exists || token.RefreshToken == "" { - authUrl := conf.AuthCodeURL("state", oauth2.AccessTypeOffline) - authCode := authFn(authUrl)() - token, err = conf.Exchange(oauth2.NoContext, authCode) - if err != nil { - return nil, fmt.Errorf("Failed to exchange auth code for token: %s", err) - } - } + // Require auth code if token file does not exist + // or refresh token is missing + if !exists || token.RefreshToken == "" { + authUrl := conf.AuthCodeURL("state", oauth2.AccessTypeOffline) + authCode := authFn(authUrl)() + token, err = conf.Exchange(oauth2.NoContext, authCode) + if err != nil { + return nil, fmt.Errorf("Failed to exchange auth code for token: %s", err) + } + } - return oauth2.NewClient( - oauth2.NoContext, - FileSource(tokenFile, token, conf), - ), nil + return oauth2.NewClient( + oauth2.NoContext, + FileSource(tokenFile, token, conf), + ), nil } func NewRefreshTokenClient(clientId, clientSecret, refreshToken string) *http.Client { - conf := getConfig(clientId, clientSecret) + conf := getConfig(clientId, clientSecret) - token := &oauth2.Token{ - TokenType: "Bearer", - RefreshToken: refreshToken, - Expiry: time.Now(), - } + token := &oauth2.Token{ + TokenType: "Bearer", + RefreshToken: refreshToken, + Expiry: time.Now(), + } - return oauth2.NewClient( - oauth2.NoContext, - conf.TokenSource(oauth2.NoContext, token), - ) + return oauth2.NewClient( + oauth2.NoContext, + conf.TokenSource(oauth2.NoContext, token), + ) } func NewAccessTokenClient(clientId, clientSecret, accessToken string) *http.Client { - conf := getConfig(clientId, clientSecret) + conf := getConfig(clientId, clientSecret) - token := &oauth2.Token{ - TokenType: "Bearer", - AccessToken: accessToken, - } + token := &oauth2.Token{ + TokenType: "Bearer", + AccessToken: accessToken, + } - return oauth2.NewClient( - oauth2.NoContext, - conf.TokenSource(oauth2.NoContext, token), - ) + return oauth2.NewClient( + oauth2.NoContext, + conf.TokenSource(oauth2.NoContext, token), + ) } func getConfig(clientId, clientSecret string) *oauth2.Config { - return &oauth2.Config{ - ClientID: clientId, - ClientSecret: clientSecret, - Scopes: []string{"https://www.googleapis.com/auth/drive"}, - RedirectURL: "urn:ietf:wg:oauth:2.0:oob", - Endpoint: oauth2.Endpoint{ - AuthURL: "https://accounts.google.com/o/oauth2/auth", - TokenURL: "https://accounts.google.com/o/oauth2/token", - }, - } + return &oauth2.Config{ + ClientID: clientId, + ClientSecret: clientSecret, + Scopes: []string{"https://www.googleapis.com/auth/drive"}, + RedirectURL: "urn:ietf:wg:oauth:2.0:oob", + Endpoint: oauth2.Endpoint{ + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://accounts.google.com/o/oauth2/token", + }, + } } diff --git a/auth/util.go b/auth/util.go index b053c1fd..dfa4adfc 100644 --- a/auth/util.go +++ b/auth/util.go @@ -1,22 +1,22 @@ package auth import ( - "os" - "path/filepath" + "os" + "path/filepath" ) func mkdir(path string) error { - dir := filepath.Dir(path) - if fileExists(dir) { - return nil - } - return os.Mkdir(dir, 0700) + dir := filepath.Dir(path) + if fileExists(dir) { + return nil + } + return os.Mkdir(dir, 0700) } func fileExists(path string) bool { - _, err := os.Stat(path) - if err == nil { - return true - } - return false + _, err := os.Stat(path) + if err == nil { + return true + } + return false } diff --git a/cli/context.go b/cli/context.go index ce82b175..cafb03fd 100644 --- a/cli/context.go +++ b/cli/context.go @@ -1,33 +1,32 @@ package cli - type Context struct { - args Arguments - handlers []*Handler + args Arguments + handlers []*Handler } func (self Context) Args() Arguments { - return self.args + return self.args } func (self Context) Handlers() []*Handler { - return self.handlers + return self.handlers } type Arguments map[string]interface{} func (self Arguments) String(key string) string { - return self[key].(string) + return self[key].(string) } func (self Arguments) Int64(key string) int64 { - return self[key].(int64) + return self[key].(int64) } func (self Arguments) Bool(key string) bool { - return self[key].(bool) + return self[key].(bool) } func (self Arguments) StringSlice(key string) []string { - return self[key].([]string) + return self[key].([]string) } diff --git a/cli/flags.go b/cli/flags.go index 6c82ed76..61ecfb40 100644 --- a/cli/flags.go +++ b/cli/flags.go @@ -1,162 +1,160 @@ package cli type Flag interface { - GetPatterns() []string - GetName() string - GetDescription() string - GetParser() Parser + GetPatterns() []string + GetName() string + GetDescription() string + GetParser() Parser } func getFlagParser(flags []Flag) Parser { - var parsers []Parser + var parsers []Parser - for _, flag := range flags { - parsers = append(parsers, flag.GetParser()) - } + for _, flag := range flags { + parsers = append(parsers, flag.GetParser()) + } - return FlagParser{parsers} + return FlagParser{parsers} } - type BoolFlag struct { - Patterns []string - Name string - Description string - DefaultValue bool - OmitValue bool + Patterns []string + Name string + Description string + DefaultValue bool + OmitValue bool } func (self BoolFlag) GetName() string { - return self.Name + return self.Name } func (self BoolFlag) GetPatterns() []string { - return self.Patterns + return self.Patterns } func (self BoolFlag) GetDescription() string { - return self.Description + return self.Description } func (self BoolFlag) GetParser() Parser { - var parsers []Parser - for _, p := range self.Patterns { - parsers = append(parsers, BoolFlagParser{ - pattern: p, - key: self.Name, - omitValue: self.OmitValue, - defaultValue: self.DefaultValue, - }) - } - - if len(parsers) == 1 { - return parsers[0] - } - return ShortCircuitParser{parsers} + var parsers []Parser + for _, p := range self.Patterns { + parsers = append(parsers, BoolFlagParser{ + pattern: p, + key: self.Name, + omitValue: self.OmitValue, + defaultValue: self.DefaultValue, + }) + } + + if len(parsers) == 1 { + return parsers[0] + } + return ShortCircuitParser{parsers} } - type StringFlag struct { - Patterns []string - Name string - Description string - DefaultValue string + Patterns []string + Name string + Description string + DefaultValue string } func (self StringFlag) GetName() string { - return self.Name + return self.Name } func (self StringFlag) GetPatterns() []string { - return self.Patterns + return self.Patterns } func (self StringFlag) GetDescription() string { - return self.Description + return self.Description } func (self StringFlag) GetParser() Parser { - var parsers []Parser - for _, p := range self.Patterns { - parsers = append(parsers, StringFlagParser{ - pattern: p, - key: self.Name, - defaultValue: self.DefaultValue, - }) - } + var parsers []Parser + for _, p := range self.Patterns { + parsers = append(parsers, StringFlagParser{ + pattern: p, + key: self.Name, + defaultValue: self.DefaultValue, + }) + } - if len(parsers) == 1 { - return parsers[0] - } - return ShortCircuitParser{parsers} + if len(parsers) == 1 { + return parsers[0] + } + return ShortCircuitParser{parsers} } type IntFlag struct { - Patterns []string - Name string - Description string - DefaultValue int64 + Patterns []string + Name string + Description string + DefaultValue int64 } func (self IntFlag) GetName() string { - return self.Name + return self.Name } func (self IntFlag) GetPatterns() []string { - return self.Patterns + return self.Patterns } func (self IntFlag) GetDescription() string { - return self.Description + return self.Description } func (self IntFlag) GetParser() Parser { - var parsers []Parser - for _, p := range self.Patterns { - parsers = append(parsers, IntFlagParser{ - pattern: p, - key: self.Name, - defaultValue: self.DefaultValue, - }) - } + var parsers []Parser + for _, p := range self.Patterns { + parsers = append(parsers, IntFlagParser{ + pattern: p, + key: self.Name, + defaultValue: self.DefaultValue, + }) + } - if len(parsers) == 1 { - return parsers[0] - } - return ShortCircuitParser{parsers} + if len(parsers) == 1 { + return parsers[0] + } + return ShortCircuitParser{parsers} } type StringSliceFlag struct { - Patterns []string - Name string - Description string - DefaultValue []string + Patterns []string + Name string + Description string + DefaultValue []string } func (self StringSliceFlag) GetName() string { - return self.Name + return self.Name } func (self StringSliceFlag) GetPatterns() []string { - return self.Patterns + return self.Patterns } func (self StringSliceFlag) GetDescription() string { - return self.Description + return self.Description } func (self StringSliceFlag) GetParser() Parser { - var parsers []Parser - for _, p := range self.Patterns { - parsers = append(parsers, StringSliceFlagParser{ - pattern: p, - key: self.Name, - defaultValue: self.DefaultValue, - }) - } - - if len(parsers) == 1 { - return parsers[0] - } - return ShortCircuitParser{parsers} + var parsers []Parser + for _, p := range self.Patterns { + parsers = append(parsers, StringSliceFlagParser{ + pattern: p, + key: self.Name, + defaultValue: self.DefaultValue, + }) + } + + if len(parsers) == 1 { + return parsers[0] + } + return ShortCircuitParser{parsers} } diff --git a/cli/handler.go b/cli/handler.go index a1a72576..3c53e7e8 100644 --- a/cli/handler.go +++ b/cli/handler.go @@ -1,119 +1,118 @@ package cli import ( - "regexp" - "strings" + "regexp" + "strings" ) -func NewFlagGroup(name string, flags...Flag) FlagGroup { - return FlagGroup{ - Name: name, - Flags: flags, - } +func NewFlagGroup(name string, flags ...Flag) FlagGroup { + return FlagGroup{ + Name: name, + Flags: flags, + } } type FlagGroup struct { - Name string - Flags []Flag + Name string + Flags []Flag } type FlagGroups []FlagGroup func (groups FlagGroups) getFlags(name string) []Flag { - for _, group := range groups { - if group.Name == name { - return group.Flags - } - } + for _, group := range groups { + if group.Name == name { + return group.Flags + } + } - return nil + return nil } var handlers []*Handler type Handler struct { - Pattern string - FlagGroups FlagGroups - Callback func(Context) - Description string + Pattern string + FlagGroups FlagGroups + Callback func(Context) + Description string } func (self *Handler) getParser() Parser { - var parsers []Parser - - for _, pattern := range self.SplitPattern() { - if isFlagGroup(pattern) { - groupName := flagGroupName(pattern) - flags := self.FlagGroups.getFlags(groupName) - parsers = append(parsers, getFlagParser(flags)) - } else if isCaptureGroup(pattern) { - parsers = append(parsers, CaptureGroupParser{pattern}) - } else { - parsers = append(parsers, EqualParser{pattern}) - } - } - - return CompleteParser{parsers} + var parsers []Parser + + for _, pattern := range self.SplitPattern() { + if isFlagGroup(pattern) { + groupName := flagGroupName(pattern) + flags := self.FlagGroups.getFlags(groupName) + parsers = append(parsers, getFlagParser(flags)) + } else if isCaptureGroup(pattern) { + parsers = append(parsers, CaptureGroupParser{pattern}) + } else { + parsers = append(parsers, EqualParser{pattern}) + } + } + + return CompleteParser{parsers} } // Split on spaces but ignore spaces inside <...> and [...] func (self *Handler) SplitPattern() []string { - re := regexp.MustCompile(`(<[^>]+>|\[[^\]]+]|\S+)`) - matches := []string{} + re := regexp.MustCompile(`(<[^>]+>|\[[^\]]+]|\S+)`) + matches := []string{} - for _, value := range re.FindAllStringSubmatch(self.Pattern, -1) { - matches = append(matches, value[1]) - } + for _, value := range re.FindAllStringSubmatch(self.Pattern, -1) { + matches = append(matches, value[1]) + } - return matches + return matches } func SetHandlers(h []*Handler) { - handlers = h + handlers = h } func AddHandler(pattern string, groups FlagGroups, callback func(Context), desc string) { - handlers = append(handlers, &Handler{ - Pattern: pattern, - FlagGroups: groups, - Callback: callback, - Description: desc, - }) + handlers = append(handlers, &Handler{ + Pattern: pattern, + FlagGroups: groups, + Callback: callback, + Description: desc, + }) } func findHandler(args []string) *Handler { - for _, h := range handlers { - if _, ok := h.getParser().Match(args); ok { - return h - } - } - return nil + for _, h := range handlers { + if _, ok := h.getParser().Match(args); ok { + return h + } + } + return nil } - func Handle(args []string) bool { - h := findHandler(args) - if h == nil { - return false - } - - _, data := h.getParser().Capture(args) - ctx := Context{ - args: data, - handlers: handlers, - } - h.Callback(ctx) - return true + h := findHandler(args) + if h == nil { + return false + } + + _, data := h.getParser().Capture(args) + ctx := Context{ + args: data, + handlers: handlers, + } + h.Callback(ctx) + return true } func isCaptureGroup(arg string) bool { - return strings.HasPrefix(arg, "<") && strings.HasSuffix(arg, ">") + return strings.HasPrefix(arg, "<") && strings.HasSuffix(arg, ">") } func isFlagGroup(arg string) bool { - return strings.HasPrefix(arg, "[") && strings.HasSuffix(arg, "]") + return strings.HasPrefix(arg, "[") && strings.HasSuffix(arg, "]") } func flagGroupName(s string) string { - return s[1:len(s) - 1] + return s[1 : len(s)-1] } diff --git a/cli/parser.go b/cli/parser.go index 5fbbe3f5..e1b5bc13 100644 --- a/cli/parser.go +++ b/cli/parser.go @@ -1,357 +1,351 @@ package cli import ( - "fmt" - "strconv" + "fmt" + "strconv" ) type Parser interface { - Match([]string) ([]string, bool) - Capture([]string) ([]string, map[string]interface{}) + Match([]string) ([]string, bool) + Capture([]string) ([]string, map[string]interface{}) } type EqualParser struct { - value string + value string } func (self EqualParser) Match(values []string) ([]string, bool) { - if len(values) == 0 { - return values, false - } + if len(values) == 0 { + return values, false + } - if self.value == values[0] { - return values[1:], true - } + if self.value == values[0] { + return values[1:], true + } - return values, false + return values, false } func (self EqualParser) Capture(values []string) ([]string, map[string]interface{}) { - remainingValues, _ := self.Match(values) - return remainingValues, nil + remainingValues, _ := self.Match(values) + return remainingValues, nil } func (self EqualParser) String() string { - return fmt.Sprintf("EqualParser '%s'", self.value) + return fmt.Sprintf("EqualParser '%s'", self.value) } - type CaptureGroupParser struct { - value string + value string } func (self CaptureGroupParser) Match(values []string) ([]string, bool) { - if len(values) == 0 { - return values, false - } + if len(values) == 0 { + return values, false + } - return values[1:], true + return values[1:], true } func (self CaptureGroupParser) key() string { - return self.value[1:len(self.value) - 1] + return self.value[1 : len(self.value)-1] } func (self CaptureGroupParser) Capture(values []string) ([]string, map[string]interface{}) { - if remainingValues, ok := self.Match(values); ok { - return remainingValues, map[string]interface{}{self.key(): values[0]} - } + if remainingValues, ok := self.Match(values); ok { + return remainingValues, map[string]interface{}{self.key(): values[0]} + } - return values, nil + return values, nil } func (self CaptureGroupParser) String() string { - return fmt.Sprintf("CaptureGroupParser '%s'", self.value) + return fmt.Sprintf("CaptureGroupParser '%s'", self.value) } - - type BoolFlagParser struct { - pattern string - key string - omitValue bool - defaultValue bool + pattern string + key string + omitValue bool + defaultValue bool } func (self BoolFlagParser) Match(values []string) ([]string, bool) { - if self.omitValue { - return flagKeyMatch(self.pattern, values, 0) - } + if self.omitValue { + return flagKeyMatch(self.pattern, values, 0) + } - remaining, value, ok := flagKeyValueMatch(self.pattern, values, 0) - if !ok { - return remaining, false - } + remaining, value, ok := flagKeyValueMatch(self.pattern, values, 0) + if !ok { + return remaining, false + } - // Check that value is a valid boolean - if _, err := strconv.ParseBool(value); err != nil { - return remaining, false - } + // Check that value is a valid boolean + if _, err := strconv.ParseBool(value); err != nil { + return remaining, false + } - return remaining, true + return remaining, true } func (self BoolFlagParser) Capture(values []string) ([]string, map[string]interface{}) { - if self.omitValue { - remaining, ok := flagKeyMatch(self.pattern, values, 0) - return remaining, map[string]interface{}{self.key: ok} - } + if self.omitValue { + remaining, ok := flagKeyMatch(self.pattern, values, 0) + return remaining, map[string]interface{}{self.key: ok} + } - remaining, value, ok := flagKeyValueMatch(self.pattern, values, 0) - if !ok { - return remaining, map[string]interface{}{self.key: self.defaultValue} - } + remaining, value, ok := flagKeyValueMatch(self.pattern, values, 0) + if !ok { + return remaining, map[string]interface{}{self.key: self.defaultValue} + } - b, _ := strconv.ParseBool(value) - return remaining, map[string]interface{}{self.key: b} + b, _ := strconv.ParseBool(value) + return remaining, map[string]interface{}{self.key: b} } func (self BoolFlagParser) String() string { - return fmt.Sprintf("BoolFlagParser '%s'", self.pattern) + return fmt.Sprintf("BoolFlagParser '%s'", self.pattern) } type StringFlagParser struct { - pattern string - key string - defaultValue string + pattern string + key string + defaultValue string } func (self StringFlagParser) Match(values []string) ([]string, bool) { - remaining, _, ok := flagKeyValueMatch(self.pattern, values, 0) - return remaining, ok + remaining, _, ok := flagKeyValueMatch(self.pattern, values, 0) + return remaining, ok } func (self StringFlagParser) Capture(values []string) ([]string, map[string]interface{}) { - remaining, value, ok := flagKeyValueMatch(self.pattern, values, 0) - if !ok { - return remaining, map[string]interface{}{self.key: self.defaultValue} - } + remaining, value, ok := flagKeyValueMatch(self.pattern, values, 0) + if !ok { + return remaining, map[string]interface{}{self.key: self.defaultValue} + } - return remaining, map[string]interface{}{self.key: value} + return remaining, map[string]interface{}{self.key: value} } func (self StringFlagParser) String() string { - return fmt.Sprintf("StringFlagParser '%s'", self.pattern) + return fmt.Sprintf("StringFlagParser '%s'", self.pattern) } type IntFlagParser struct { - pattern string - key string - defaultValue int64 + pattern string + key string + defaultValue int64 } func (self IntFlagParser) Match(values []string) ([]string, bool) { - remaining, value, ok := flagKeyValueMatch(self.pattern, values, 0) - if !ok { - return remaining, false - } + remaining, value, ok := flagKeyValueMatch(self.pattern, values, 0) + if !ok { + return remaining, false + } - // Check that value is a valid integer - if _, err := strconv.ParseInt(value, 10, 64); err != nil { - return remaining, false - } + // Check that value is a valid integer + if _, err := strconv.ParseInt(value, 10, 64); err != nil { + return remaining, false + } - return remaining, true + return remaining, true } func (self IntFlagParser) Capture(values []string) ([]string, map[string]interface{}) { - remaining, value, ok := flagKeyValueMatch(self.pattern, values, 0) - if !ok { - return remaining, map[string]interface{}{self.key: self.defaultValue} - } + remaining, value, ok := flagKeyValueMatch(self.pattern, values, 0) + if !ok { + return remaining, map[string]interface{}{self.key: self.defaultValue} + } - n, _ := strconv.ParseInt(value, 10, 64) - return remaining, map[string]interface{}{self.key: n} + n, _ := strconv.ParseInt(value, 10, 64) + return remaining, map[string]interface{}{self.key: n} } func (self IntFlagParser) String() string { - return fmt.Sprintf("IntFlagParser '%s'", self.pattern) + return fmt.Sprintf("IntFlagParser '%s'", self.pattern) } - type StringSliceFlagParser struct { - pattern string - key string - defaultValue []string + pattern string + key string + defaultValue []string } func (self StringSliceFlagParser) Match(values []string) ([]string, bool) { - if len(values) < 2 { - return values, false - } + if len(values) < 2 { + return values, false + } - var remainingValues []string + var remainingValues []string - for i := 0; i < len(values); i++ { - if values[i] == self.pattern && i + 1 < len(values) { - i++ - continue - } - remainingValues = append(remainingValues, values[i]) - } + for i := 0; i < len(values); i++ { + if values[i] == self.pattern && i+1 < len(values) { + i++ + continue + } + remainingValues = append(remainingValues, values[i]) + } - return remainingValues, len(values) != len(remainingValues) + return remainingValues, len(values) != len(remainingValues) } func (self StringSliceFlagParser) Capture(values []string) ([]string, map[string]interface{}) { - remainingValues, ok := self.Match(values) - if !ok { - return values, map[string]interface{}{self.key: self.defaultValue} - } + remainingValues, ok := self.Match(values) + if !ok { + return values, map[string]interface{}{self.key: self.defaultValue} + } - var captured []string + var captured []string - for i := 0; i < len(values); i++ { - if values[i] == self.pattern && i + 1 < len(values) { - captured = append(captured, values[i + 1]) - } - } + for i := 0; i < len(values); i++ { + if values[i] == self.pattern && i+1 < len(values) { + captured = append(captured, values[i+1]) + } + } - return remainingValues, map[string]interface{}{self.key: captured} + return remainingValues, map[string]interface{}{self.key: captured} } func (self StringSliceFlagParser) String() string { - return fmt.Sprintf("StringSliceFlagParser '%s'", self.pattern) + return fmt.Sprintf("StringSliceFlagParser '%s'", self.pattern) } - type FlagParser struct { - parsers []Parser + parsers []Parser } func (self FlagParser) Match(values []string) ([]string, bool) { - remainingValues := values + remainingValues := values - for _, parser := range self.parsers { - remainingValues, _ = parser.Match(remainingValues) - } - return remainingValues, true + for _, parser := range self.parsers { + remainingValues, _ = parser.Match(remainingValues) + } + return remainingValues, true } func (self FlagParser) Capture(values []string) ([]string, map[string]interface{}) { - captured := map[string]interface{}{} - remainingValues := values + captured := map[string]interface{}{} + remainingValues := values - for _, parser := range self.parsers { - var data map[string]interface{} - remainingValues, data = parser.Capture(remainingValues) - for key, value := range data { - captured[key] = value - } - } + for _, parser := range self.parsers { + var data map[string]interface{} + remainingValues, data = parser.Capture(remainingValues) + for key, value := range data { + captured[key] = value + } + } - return remainingValues, captured + return remainingValues, captured } func (self FlagParser) String() string { - return fmt.Sprintf("FlagParser %v", self.parsers) + return fmt.Sprintf("FlagParser %v", self.parsers) } - type ShortCircuitParser struct { - parsers []Parser + parsers []Parser } func (self ShortCircuitParser) Match(values []string) ([]string, bool) { - remainingValues := values + remainingValues := values - for _, parser := range self.parsers { - var ok bool - remainingValues, ok = parser.Match(remainingValues) - if ok { - return remainingValues, true - } - } + for _, parser := range self.parsers { + var ok bool + remainingValues, ok = parser.Match(remainingValues) + if ok { + return remainingValues, true + } + } - return remainingValues, false + return remainingValues, false } func (self ShortCircuitParser) Capture(values []string) ([]string, map[string]interface{}) { - if len(self.parsers) == 0 { - return values, nil - } + if len(self.parsers) == 0 { + return values, nil + } - for _, parser := range self.parsers { - if _, ok := parser.Match(values); ok { - return parser.Capture(values) - } - } + for _, parser := range self.parsers { + if _, ok := parser.Match(values); ok { + return parser.Capture(values) + } + } - // No parsers matched at this point, - // just return the capture value of the first one - return self.parsers[0].Capture(values) + // No parsers matched at this point, + // just return the capture value of the first one + return self.parsers[0].Capture(values) } func (self ShortCircuitParser) String() string { - return fmt.Sprintf("ShortCircuitParser %v", self.parsers) + return fmt.Sprintf("ShortCircuitParser %v", self.parsers) } type CompleteParser struct { - parsers []Parser + parsers []Parser } func (self CompleteParser) Match(values []string) ([]string, bool) { - remainingValues := copySlice(values) + remainingValues := copySlice(values) - for _, parser := range self.parsers { - var ok bool - remainingValues, ok = parser.Match(remainingValues) - if !ok { - return remainingValues, false - } - } + for _, parser := range self.parsers { + var ok bool + remainingValues, ok = parser.Match(remainingValues) + if !ok { + return remainingValues, false + } + } - return remainingValues, len(remainingValues) == 0 + return remainingValues, len(remainingValues) == 0 } func (self CompleteParser) Capture(values []string) ([]string, map[string]interface{}) { - remainingValues := copySlice(values) - data := map[string]interface{}{} + remainingValues := copySlice(values) + data := map[string]interface{}{} - for _, parser := range self.parsers { - var captured map[string]interface{} - remainingValues, captured = parser.Capture(remainingValues) - for key, value := range captured { - data[key] = value - } - } + for _, parser := range self.parsers { + var captured map[string]interface{} + remainingValues, captured = parser.Capture(remainingValues) + for key, value := range captured { + data[key] = value + } + } - return remainingValues, data + return remainingValues, data } func (self CompleteParser) String() string { - return fmt.Sprintf("CompleteParser %v", self.parsers) + return fmt.Sprintf("CompleteParser %v", self.parsers) } func flagKeyValueMatch(key string, values []string, index int) ([]string, string, bool) { - if index > len(values) - 2 { - return values, "", false - } + if index > len(values)-2 { + return values, "", false + } - if values[index] == key { - value := values[index + 1] - remaining := append(copySlice(values[:index]), values[index + 2:]...) - return remaining, value, true - } + if values[index] == key { + value := values[index+1] + remaining := append(copySlice(values[:index]), values[index+2:]...) + return remaining, value, true + } - return flagKeyValueMatch(key, values, index + 1) + return flagKeyValueMatch(key, values, index+1) } func flagKeyMatch(key string, values []string, index int) ([]string, bool) { - if index > len(values) - 1 { - return values, false - } + if index > len(values)-1 { + return values, false + } - if values[index] == key { - remaining := append(copySlice(values[:index]), values[index + 1:]...) - return remaining, true - } + if values[index] == key { + remaining := append(copySlice(values[:index]), values[index+1:]...) + return remaining, true + } - return flagKeyMatch(key, values, index + 1) + return flagKeyMatch(key, values, index+1) } func copySlice(a []string) []string { - b := make([]string, len(a)) - copy(b, a) - return b + b := make([]string, len(a)) + copy(b, a) + return b } diff --git a/compare.go b/compare.go index 10cab3c0..7dd8c86f 100644 --- a/compare.go +++ b/compare.go @@ -1,74 +1,74 @@ package main import ( - "os" - "encoding/json" "./drive" + "encoding/json" + "os" ) const MinCacheFileSize = 5 * 1024 * 1024 -type Md5Comparer struct {} +type Md5Comparer struct{} func (self Md5Comparer) Changed(local *drive.LocalFile, remote *drive.RemoteFile) bool { - return remote.Md5() != md5sum(local.AbsPath()) + return remote.Md5() != md5sum(local.AbsPath()) } type CachedFileInfo struct { - Size int64 `json:"size"` - Modified int64 `json:"modified"` - Md5 string `json:"md5"` + Size int64 `json:"size"` + Modified int64 `json:"modified"` + Md5 string `json:"md5"` } func NewCachedMd5Comparer(path string) CachedMd5Comparer { - cache := map[string]*CachedFileInfo{} - - f, err := os.Open(path) - if err == nil { - json.NewDecoder(f).Decode(&cache) - } - f.Close() - return CachedMd5Comparer{path, cache} + cache := map[string]*CachedFileInfo{} + + f, err := os.Open(path) + if err == nil { + json.NewDecoder(f).Decode(&cache) + } + f.Close() + return CachedMd5Comparer{path, cache} } type CachedMd5Comparer struct { - path string - cache map[string]*CachedFileInfo + path string + cache map[string]*CachedFileInfo } func (self CachedMd5Comparer) Changed(local *drive.LocalFile, remote *drive.RemoteFile) bool { - return remote.Md5() != self.md5(local) + return remote.Md5() != self.md5(local) } func (self CachedMd5Comparer) md5(local *drive.LocalFile) string { - // See if file exist in cache - cached, found := self.cache[local.AbsPath()] + // See if file exist in cache + cached, found := self.cache[local.AbsPath()] - // If found and modification time and size has not changed, return cached md5 - if found && local.Modified().UnixNano() == cached.Modified && local.Size() == cached.Size { - return cached.Md5 - } + // If found and modification time and size has not changed, return cached md5 + if found && local.Modified().UnixNano() == cached.Modified && local.Size() == cached.Size { + return cached.Md5 + } - // Calculate new md5 sum - md5 := md5sum(local.AbsPath()) + // Calculate new md5 sum + md5 := md5sum(local.AbsPath()) - // Cache file info if file meets size criteria - if local.Size() > MinCacheFileSize { - self.cacheAdd(local, md5) - self.persist() - } + // Cache file info if file meets size criteria + if local.Size() > MinCacheFileSize { + self.cacheAdd(local, md5) + self.persist() + } - return md5 + return md5 } func (self CachedMd5Comparer) cacheAdd(lf *drive.LocalFile, md5 string) { - self.cache[lf.AbsPath()] = &CachedFileInfo{ - Size: lf.Size(), - Modified: lf.Modified().UnixNano(), - Md5: md5, - } + self.cache[lf.AbsPath()] = &CachedFileInfo{ + Size: lf.Size(), + Modified: lf.Modified().UnixNano(), + Md5: md5, + } } func (self CachedMd5Comparer) persist() { - writeJson(self.path, self.cache) + writeJson(self.path, self.cache) } diff --git a/drive/about.go b/drive/about.go index 4c23ab88..c2f1643d 100644 --- a/drive/about.go +++ b/drive/about.go @@ -1,68 +1,68 @@ package drive import ( - "io" - "fmt" - "text/tabwriter" + "fmt" + "io" + "text/tabwriter" ) type AboutArgs struct { - Out io.Writer - SizeInBytes bool + Out io.Writer + SizeInBytes bool } func (self *Drive) About(args AboutArgs) (err error) { - about, err := self.service.About.Get().Fields("maxImportSizes", "maxUploadSize", "storageQuota", "user").Do() - if err != nil { - return fmt.Errorf("Failed to get about: %s", err) - } + about, err := self.service.About.Get().Fields("maxImportSizes", "maxUploadSize", "storageQuota", "user").Do() + if err != nil { + return fmt.Errorf("Failed to get about: %s", err) + } - user := about.User - quota := about.StorageQuota + user := about.User + quota := about.StorageQuota - fmt.Fprintf(args.Out, "User: %s, %s\n", user.DisplayName, user.EmailAddress) - fmt.Fprintf(args.Out, "Used: %s\n", formatSize(quota.Usage, args.SizeInBytes)) - fmt.Fprintf(args.Out, "Free: %s\n", formatSize(quota.Limit - quota.Usage, args.SizeInBytes)) - fmt.Fprintf(args.Out, "Total: %s\n", formatSize(quota.Limit, args.SizeInBytes)) - fmt.Fprintf(args.Out, "Max upload size: %s\n", formatSize(about.MaxUploadSize, args.SizeInBytes)) - return + fmt.Fprintf(args.Out, "User: %s, %s\n", user.DisplayName, user.EmailAddress) + fmt.Fprintf(args.Out, "Used: %s\n", formatSize(quota.Usage, args.SizeInBytes)) + fmt.Fprintf(args.Out, "Free: %s\n", formatSize(quota.Limit-quota.Usage, args.SizeInBytes)) + fmt.Fprintf(args.Out, "Total: %s\n", formatSize(quota.Limit, args.SizeInBytes)) + fmt.Fprintf(args.Out, "Max upload size: %s\n", formatSize(about.MaxUploadSize, args.SizeInBytes)) + return } type AboutImportArgs struct { - Out io.Writer + Out io.Writer } func (self *Drive) AboutImport(args AboutImportArgs) (err error) { - about, err := self.service.About.Get().Fields("importFormats").Do() - if err != nil { - return fmt.Errorf("Failed to get about: %s", err) - } - printAboutFormats(args.Out, about.ImportFormats) - return + about, err := self.service.About.Get().Fields("importFormats").Do() + if err != nil { + return fmt.Errorf("Failed to get about: %s", err) + } + printAboutFormats(args.Out, about.ImportFormats) + return } type AboutExportArgs struct { - Out io.Writer + Out io.Writer } func (self *Drive) AboutExport(args AboutExportArgs) (err error) { - about, err := self.service.About.Get().Fields("exportFormats").Do() - if err != nil { - return fmt.Errorf("Failed to get about: %s", err) - } - printAboutFormats(args.Out, about.ExportFormats) - return + about, err := self.service.About.Get().Fields("exportFormats").Do() + if err != nil { + return fmt.Errorf("Failed to get about: %s", err) + } + printAboutFormats(args.Out, about.ExportFormats) + return } func printAboutFormats(out io.Writer, formats map[string][]string) { - w := new(tabwriter.Writer) - w.Init(out, 0, 0, 3, ' ', 0) + w := new(tabwriter.Writer) + w.Init(out, 0, 0, 3, ' ', 0) - fmt.Fprintln(w, "From\tTo") + fmt.Fprintln(w, "From\tTo") - for from, toFormats := range formats { - fmt.Fprintf(w, "%s\t%s\n", from, formatList(toFormats)) - } + for from, toFormats := range formats { + fmt.Fprintf(w, "%s\t%s\n", from, formatList(toFormats)) + } - w.Flush() + w.Flush() } diff --git a/drive/changes.go b/drive/changes.go index 1d9a89dc..ffd78242 100644 --- a/drive/changes.go +++ b/drive/changes.go @@ -1,103 +1,103 @@ package drive import ( - "fmt" - "io" - "text/tabwriter" - "google.golang.org/api/drive/v3" + "fmt" + "google.golang.org/api/drive/v3" + "io" + "text/tabwriter" ) type ListChangesArgs struct { - Out io.Writer - PageToken string - MaxChanges int64 - Now bool - NameWidth int64 - SkipHeader bool + Out io.Writer + PageToken string + MaxChanges int64 + Now bool + NameWidth int64 + SkipHeader bool } func (self *Drive) ListChanges(args ListChangesArgs) error { - if args.Now { - pageToken, err := self.GetChangesStartPageToken() - if err != nil { - return err - } - - fmt.Fprintf(args.Out, "Page token: %s\n", pageToken) - return nil - } - - changeList, err := self.service.Changes.List(args.PageToken).PageSize(args.MaxChanges).RestrictToMyDrive(true).Fields("newStartPageToken", "nextPageToken", "changes(fileId,removed,time,file(id,name,md5Checksum,mimeType,createdTime,modifiedTime))").Do() - if err != nil { - return fmt.Errorf("Failed listing changes: %s", err) - } - - PrintChanges(PrintChangesArgs{ - Out: args.Out, - ChangeList: changeList, - NameWidth: int(args.NameWidth), - SkipHeader: args.SkipHeader, - }) - - return nil + if args.Now { + pageToken, err := self.GetChangesStartPageToken() + if err != nil { + return err + } + + fmt.Fprintf(args.Out, "Page token: %s\n", pageToken) + return nil + } + + changeList, err := self.service.Changes.List(args.PageToken).PageSize(args.MaxChanges).RestrictToMyDrive(true).Fields("newStartPageToken", "nextPageToken", "changes(fileId,removed,time,file(id,name,md5Checksum,mimeType,createdTime,modifiedTime))").Do() + if err != nil { + return fmt.Errorf("Failed listing changes: %s", err) + } + + PrintChanges(PrintChangesArgs{ + Out: args.Out, + ChangeList: changeList, + NameWidth: int(args.NameWidth), + SkipHeader: args.SkipHeader, + }) + + return nil } func (self *Drive) GetChangesStartPageToken() (string, error) { - res, err := self.service.Changes.GetStartPageToken().Do() - if err != nil { - return "", fmt.Errorf("Failed getting start page token: %s", err) - } + res, err := self.service.Changes.GetStartPageToken().Do() + if err != nil { + return "", fmt.Errorf("Failed getting start page token: %s", err) + } - return res.StartPageToken, nil + return res.StartPageToken, nil } type PrintChangesArgs struct { - Out io.Writer - ChangeList *drive.ChangeList - NameWidth int - SkipHeader bool + Out io.Writer + ChangeList *drive.ChangeList + NameWidth int + SkipHeader bool } func PrintChanges(args PrintChangesArgs) { - w := new(tabwriter.Writer) - w.Init(args.Out, 0, 0, 3, ' ', 0) - - if !args.SkipHeader { - fmt.Fprintln(w, "Id\tName\tAction\tTime") - } - - for _, c := range args.ChangeList.Changes { - var name string - var action string - - if c.Removed { - action = "remove" - } else { - name = c.File.Name - action = "update" - } - - fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", - c.FileId, - truncateString(name, args.NameWidth), - action, - formatDatetime(c.Time), - ) - } - - if len(args.ChangeList.Changes) > 0 { - w.Flush() - pageToken, hasMore := nextChangesPageToken(args.ChangeList) - fmt.Fprintf(args.Out, "\nToken: %s, more: %t\n", pageToken, hasMore) - } else { - fmt.Fprintln(args.Out, "No changes") - } + w := new(tabwriter.Writer) + w.Init(args.Out, 0, 0, 3, ' ', 0) + + if !args.SkipHeader { + fmt.Fprintln(w, "Id\tName\tAction\tTime") + } + + for _, c := range args.ChangeList.Changes { + var name string + var action string + + if c.Removed { + action = "remove" + } else { + name = c.File.Name + action = "update" + } + + fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", + c.FileId, + truncateString(name, args.NameWidth), + action, + formatDatetime(c.Time), + ) + } + + if len(args.ChangeList.Changes) > 0 { + w.Flush() + pageToken, hasMore := nextChangesPageToken(args.ChangeList) + fmt.Fprintf(args.Out, "\nToken: %s, more: %t\n", pageToken, hasMore) + } else { + fmt.Fprintln(args.Out, "No changes") + } } func nextChangesPageToken(cl *drive.ChangeList) (string, bool) { - if cl.NextPageToken != "" { - return cl.NextPageToken, true - } + if cl.NextPageToken != "" { + return cl.NextPageToken, true + } - return cl.NewStartPageToken, false + return cl.NewStartPageToken, false } diff --git a/drive/delete.go b/drive/delete.go index bacd4a34..314672ce 100644 --- a/drive/delete.go +++ b/drive/delete.go @@ -1,39 +1,39 @@ package drive import ( - "io" - "fmt" + "fmt" + "io" ) type DeleteArgs struct { - Out io.Writer - Id string - Recursive bool + Out io.Writer + Id string + Recursive bool } func (self *Drive) Delete(args DeleteArgs) error { - f, err := self.service.Files.Get(args.Id).Fields("name", "mimeType").Do() - if err != nil { - return fmt.Errorf("Failed to get file: %s", err) - } + f, err := self.service.Files.Get(args.Id).Fields("name", "mimeType").Do() + if err != nil { + return fmt.Errorf("Failed to get file: %s", err) + } - if isDir(f) && !args.Recursive { - return fmt.Errorf("'%s' is a directory, use the 'recursive' flag to delete directories", f.Name) - } + if isDir(f) && !args.Recursive { + return fmt.Errorf("'%s' is a directory, use the 'recursive' flag to delete directories", f.Name) + } - err = self.service.Files.Delete(args.Id).Do() - if err != nil { - return fmt.Errorf("Failed to delete file: %s", err) - } + err = self.service.Files.Delete(args.Id).Do() + if err != nil { + return fmt.Errorf("Failed to delete file: %s", err) + } - fmt.Fprintf(args.Out, "Deleted '%s'\n", f.Name) - return nil + fmt.Fprintf(args.Out, "Deleted '%s'\n", f.Name) + return nil } func (self *Drive) deleteFile(fileId string) error { - err := self.service.Files.Delete(fileId).Do() - if err != nil { - return fmt.Errorf("Failed to delete file: %s", err) - } - return nil + err := self.service.Files.Delete(fileId).Do() + if err != nil { + return fmt.Errorf("Failed to delete file: %s", err) + } + return nil } diff --git a/drive/download.go b/drive/download.go index 1779d575..15495dfb 100644 --- a/drive/download.go +++ b/drive/download.go @@ -1,245 +1,245 @@ package drive import ( - "fmt" - "io" - "os" - "time" - "path/filepath" - "google.golang.org/api/drive/v3" - "google.golang.org/api/googleapi" + "fmt" + "google.golang.org/api/drive/v3" + "google.golang.org/api/googleapi" + "io" + "os" + "path/filepath" + "time" ) type DownloadArgs struct { - Out io.Writer - Progress io.Writer - Id string - Path string - Force bool - Recursive bool - Delete bool - Stdout bool + Out io.Writer + Progress io.Writer + Id string + Path string + Force bool + Recursive bool + Delete bool + Stdout bool } func (self *Drive) Download(args DownloadArgs) error { - if args.Recursive { - return self.downloadRecursive(args) - } - - f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "mimeType", "md5Checksum").Do() - if err != nil { - return fmt.Errorf("Failed to get file: %s", err) - } - - if isDir(f) { - return fmt.Errorf("'%s' is a directory, use --recursive to download directories", f.Name) - } - - if !isBinary(f) { - return fmt.Errorf("'%s' is a google document and must be exported, see the export command", f.Name) - } - - bytes, rate, err := self.downloadBinary(f, args) - - if !args.Stdout { - fmt.Fprintf(args.Out, "Downloaded %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(bytes, false)) - } - - if args.Delete { - err = self.deleteFile(args.Id) - if err != nil { - return fmt.Errorf("Failed to delete file: %s", err) - } - - if !args.Stdout { - fmt.Fprintf(args.Out, "Removed %s\n", args.Id) - } - } - return err + if args.Recursive { + return self.downloadRecursive(args) + } + + f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "mimeType", "md5Checksum").Do() + if err != nil { + return fmt.Errorf("Failed to get file: %s", err) + } + + if isDir(f) { + return fmt.Errorf("'%s' is a directory, use --recursive to download directories", f.Name) + } + + if !isBinary(f) { + return fmt.Errorf("'%s' is a google document and must be exported, see the export command", f.Name) + } + + bytes, rate, err := self.downloadBinary(f, args) + + if !args.Stdout { + fmt.Fprintf(args.Out, "Downloaded %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(bytes, false)) + } + + if args.Delete { + err = self.deleteFile(args.Id) + if err != nil { + return fmt.Errorf("Failed to delete file: %s", err) + } + + if !args.Stdout { + fmt.Fprintf(args.Out, "Removed %s\n", args.Id) + } + } + return err } type DownloadQueryArgs struct { - Out io.Writer - Progress io.Writer - Query string - Path string - Force bool - Recursive bool + Out io.Writer + Progress io.Writer + Query string + Path string + Force bool + Recursive bool } func (self *Drive) DownloadQuery(args DownloadQueryArgs) error { - listArgs := listAllFilesArgs{ - query: args.Query, - fields: []googleapi.Field{"nextPageToken", "files(id,name,mimeType,size,md5Checksum)"}, - } - files, err := self.listAllFiles(listArgs) - if err != nil { - return fmt.Errorf("Failed to list files: %s", err) - } - - downloadArgs := DownloadArgs{ - Out: args.Out, - Progress: args.Progress, - Path: args.Path, - Force: args.Force, - } - - for _, f := range files { - if isDir(f) && args.Recursive { - err = self.downloadDirectory(f, downloadArgs) - } else if isBinary(f) { - _, _, err = self.downloadBinary(f, downloadArgs) - } - - if err != nil { - return err - } - } - - return nil + listArgs := listAllFilesArgs{ + query: args.Query, + fields: []googleapi.Field{"nextPageToken", "files(id,name,mimeType,size,md5Checksum)"}, + } + files, err := self.listAllFiles(listArgs) + if err != nil { + return fmt.Errorf("Failed to list files: %s", err) + } + + downloadArgs := DownloadArgs{ + Out: args.Out, + Progress: args.Progress, + Path: args.Path, + Force: args.Force, + } + + for _, f := range files { + if isDir(f) && args.Recursive { + err = self.downloadDirectory(f, downloadArgs) + } else if isBinary(f) { + _, _, err = self.downloadBinary(f, downloadArgs) + } + + if err != nil { + return err + } + } + + return nil } func (self *Drive) downloadRecursive(args DownloadArgs) error { - f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "mimeType", "md5Checksum").Do() - if err != nil { - return fmt.Errorf("Failed to get file: %s", err) - } - - if isDir(f) { - return self.downloadDirectory(f, args) - } else if isBinary(f) { - _, _, err = self.downloadBinary(f, args) - return err - } - - return nil + f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "mimeType", "md5Checksum").Do() + if err != nil { + return fmt.Errorf("Failed to get file: %s", err) + } + + if isDir(f) { + return self.downloadDirectory(f, args) + } else if isBinary(f) { + _, _, err = self.downloadBinary(f, args) + return err + } + + return nil } func (self *Drive) downloadBinary(f *drive.File, args DownloadArgs) (int64, int64, error) { - // Get timeout reader wrapper and context - timeoutReaderWrapper, ctx := getTimeoutReaderWrapperContext() - - res, err := self.service.Files.Get(f.Id).Context(ctx).Download() - if err != nil { - return 0, 0, fmt.Errorf("Failed to download file: %s", err) - } - - // Close body on function exit - defer res.Body.Close() - - // Path to file - fpath := filepath.Join(args.Path, f.Name) - - if !args.Stdout { - fmt.Fprintf(args.Out, "Downloading %s -> %s\n", f.Name, fpath) - } - - return self.saveFile(saveFileArgs{ - out: args.Out, - body: timeoutReaderWrapper(res.Body), - contentLength: res.ContentLength, - fpath: fpath, - force: args.Force, - stdout: args.Stdout, - progress: args.Progress, - }) + // Get timeout reader wrapper and context + timeoutReaderWrapper, ctx := getTimeoutReaderWrapperContext() + + res, err := self.service.Files.Get(f.Id).Context(ctx).Download() + if err != nil { + return 0, 0, fmt.Errorf("Failed to download file: %s", err) + } + + // Close body on function exit + defer res.Body.Close() + + // Path to file + fpath := filepath.Join(args.Path, f.Name) + + if !args.Stdout { + fmt.Fprintf(args.Out, "Downloading %s -> %s\n", f.Name, fpath) + } + + return self.saveFile(saveFileArgs{ + out: args.Out, + body: timeoutReaderWrapper(res.Body), + contentLength: res.ContentLength, + fpath: fpath, + force: args.Force, + stdout: args.Stdout, + progress: args.Progress, + }) } type saveFileArgs struct { - out io.Writer - body io.Reader - contentLength int64 - fpath string - force bool - stdout bool - progress io.Writer + out io.Writer + body io.Reader + contentLength int64 + fpath string + force bool + stdout bool + progress io.Writer } func (self *Drive) saveFile(args saveFileArgs) (int64, int64, error) { - // Wrap response body in progress reader - srcReader := getProgressReader(args.body, args.progress, args.contentLength) - - if args.stdout { - // Write file content to stdout - _, err := io.Copy(args.out, srcReader) - return 0, 0, err - } - - // Check if file exists - if !args.force && fileExists(args.fpath) { - return 0, 0, fmt.Errorf("File '%s' already exists, use --force to overwrite", args.fpath) - } - - // Ensure any parent directories exists - if err := mkdir(args.fpath); err != nil { - return 0, 0, err - } - - // Download to tmp file - tmpPath := args.fpath + ".incomplete" - - // Create new file - outFile, err := os.Create(tmpPath) - if err != nil { - return 0, 0, fmt.Errorf("Unable to create new file: %s", err) - } - - started := time.Now() - - // Save file to disk - bytes, err := io.Copy(outFile, srcReader) - if err != nil { - outFile.Close() - os.Remove(tmpPath) - return 0, 0, fmt.Errorf("Failed saving file: %s", err) - } - - // Calculate average download rate - rate := calcRate(bytes, started, time.Now()) - - //if deleteSourceFile { - // self.Delete(args.Id) - //} - - // Close File - outFile.Close() - - // Rename tmp file to proper filename - return bytes, rate, os.Rename(tmpPath, args.fpath) + // Wrap response body in progress reader + srcReader := getProgressReader(args.body, args.progress, args.contentLength) + + if args.stdout { + // Write file content to stdout + _, err := io.Copy(args.out, srcReader) + return 0, 0, err + } + + // Check if file exists + if !args.force && fileExists(args.fpath) { + return 0, 0, fmt.Errorf("File '%s' already exists, use --force to overwrite", args.fpath) + } + + // Ensure any parent directories exists + if err := mkdir(args.fpath); err != nil { + return 0, 0, err + } + + // Download to tmp file + tmpPath := args.fpath + ".incomplete" + + // Create new file + outFile, err := os.Create(tmpPath) + if err != nil { + return 0, 0, fmt.Errorf("Unable to create new file: %s", err) + } + + started := time.Now() + + // Save file to disk + bytes, err := io.Copy(outFile, srcReader) + if err != nil { + outFile.Close() + os.Remove(tmpPath) + return 0, 0, fmt.Errorf("Failed saving file: %s", err) + } + + // Calculate average download rate + rate := calcRate(bytes, started, time.Now()) + + //if deleteSourceFile { + // self.Delete(args.Id) + //} + + // Close File + outFile.Close() + + // Rename tmp file to proper filename + return bytes, rate, os.Rename(tmpPath, args.fpath) } func (self *Drive) downloadDirectory(parent *drive.File, args DownloadArgs) error { - listArgs := listAllFilesArgs{ - query: fmt.Sprintf("'%s' in parents", parent.Id), - fields: []googleapi.Field{"nextPageToken", "files(id,name)"}, - } - files, err := self.listAllFiles(listArgs) - if err != nil { - return fmt.Errorf("Failed listing files: %s", err) - } - - newPath := filepath.Join(args.Path, parent.Name) - - for _, f := range files { - // Copy args and update changed fields - newArgs := args - newArgs.Path = newPath - newArgs.Id = f.Id - newArgs.Stdout = false - - err = self.downloadRecursive(newArgs) - if err != nil { - return err - } - } - - return nil + listArgs := listAllFilesArgs{ + query: fmt.Sprintf("'%s' in parents", parent.Id), + fields: []googleapi.Field{"nextPageToken", "files(id,name)"}, + } + files, err := self.listAllFiles(listArgs) + if err != nil { + return fmt.Errorf("Failed listing files: %s", err) + } + + newPath := filepath.Join(args.Path, parent.Name) + + for _, f := range files { + // Copy args and update changed fields + newArgs := args + newArgs.Path = newPath + newArgs.Id = f.Id + newArgs.Stdout = false + + err = self.downloadRecursive(newArgs) + if err != nil { + return err + } + } + + return nil } func isDir(f *drive.File) bool { - return f.MimeType == DirectoryMimeType + return f.MimeType == DirectoryMimeType } func isBinary(f *drive.File) bool { - return f.Md5Checksum != "" + return f.Md5Checksum != "" } diff --git a/drive/drive.go b/drive/drive.go index d908beb2..696f5d55 100644 --- a/drive/drive.go +++ b/drive/drive.go @@ -1,19 +1,19 @@ package drive import ( - "net/http" - "google.golang.org/api/drive/v3" + "google.golang.org/api/drive/v3" + "net/http" ) type Drive struct { - service *drive.Service + service *drive.Service } func New(client *http.Client) (*Drive, error) { - service, err := drive.New(client) - if err != nil { - return nil, err - } + service, err := drive.New(client) + if err != nil { + return nil, err + } - return &Drive{service}, nil + return &Drive{service}, nil } diff --git a/drive/errors.go b/drive/errors.go index 703dae53..e7631f77 100644 --- a/drive/errors.go +++ b/drive/errors.go @@ -1,22 +1,22 @@ package drive import ( - "google.golang.org/api/googleapi" - "time" + "google.golang.org/api/googleapi" + "time" ) const MaxBackendErrorRetries = 5 func isBackendError(err error) bool { - if err == nil { - return false - } + if err == nil { + return false + } - ae, ok := err.(*googleapi.Error) - return ok && ae.Code >= 500 && ae.Code <= 599 + ae, ok := err.(*googleapi.Error) + return ok && ae.Code >= 500 && ae.Code <= 599 } func exponentialBackoffSleep(try int) { - seconds := pow(2, try) - time.Sleep(time.Duration(seconds) * time.Second) + seconds := pow(2, try) + time.Sleep(time.Duration(seconds) * time.Second) } diff --git a/drive/export.go b/drive/export.go index c90bc100..3fdd45a6 100644 --- a/drive/export.go +++ b/drive/export.go @@ -1,111 +1,111 @@ package drive import ( - "io" - "os" - "fmt" - "mime" + "fmt" + "io" + "mime" + "os" ) var DefaultExportMime = map[string]string{ - "application/vnd.google-apps.form": "application/zip", - "application/vnd.google-apps.document": "application/pdf", - "application/vnd.google-apps.drawing": "image/svg+xml", - "application/vnd.google-apps.spreadsheet": "text/csv", - "application/vnd.google-apps.script": "application/vnd.google-apps.script+json", - "application/vnd.google-apps.presentation": "application/pdf", + "application/vnd.google-apps.form": "application/zip", + "application/vnd.google-apps.document": "application/pdf", + "application/vnd.google-apps.drawing": "image/svg+xml", + "application/vnd.google-apps.spreadsheet": "text/csv", + "application/vnd.google-apps.script": "application/vnd.google-apps.script+json", + "application/vnd.google-apps.presentation": "application/pdf", } type ExportArgs struct { - Out io.Writer - Id string - PrintMimes bool - Mime string - Force bool + Out io.Writer + Id string + PrintMimes bool + Mime string + Force bool } func (self *Drive) Export(args ExportArgs) error { - f, err := self.service.Files.Get(args.Id).Fields("name", "mimeType").Do() - if err != nil { - return fmt.Errorf("Failed to get file: %s", err) - } - - if args.PrintMimes { - return self.printMimes(args.Out, f.MimeType) - } - - exportMime, err := getExportMime(args.Mime, f.MimeType) - if err != nil { - return err - } - - filename := getExportFilename(f.Name, exportMime) - - res, err := self.service.Files.Export(args.Id, exportMime).Download() - if err != nil { - return fmt.Errorf("Failed to download file: %s", err) - } - - // Close body on function exit - defer res.Body.Close() - - // Check if file exists - if !args.Force && fileExists(filename) { - return fmt.Errorf("File '%s' already exists, use --force to overwrite", filename) - } - - // Create new file - outFile, err := os.Create(filename) - if err != nil { - return fmt.Errorf("Unable to create new file '%s': %s", filename, err) - } - - // Close file on function exit - defer outFile.Close() - - // Save file to disk - _, err = io.Copy(outFile, res.Body) - if err != nil { - return fmt.Errorf("Failed saving file: %s", err) - } - - fmt.Fprintf(args.Out, "Exported '%s' with mime type: '%s'\n", filename, exportMime) - return nil + f, err := self.service.Files.Get(args.Id).Fields("name", "mimeType").Do() + if err != nil { + return fmt.Errorf("Failed to get file: %s", err) + } + + if args.PrintMimes { + return self.printMimes(args.Out, f.MimeType) + } + + exportMime, err := getExportMime(args.Mime, f.MimeType) + if err != nil { + return err + } + + filename := getExportFilename(f.Name, exportMime) + + res, err := self.service.Files.Export(args.Id, exportMime).Download() + if err != nil { + return fmt.Errorf("Failed to download file: %s", err) + } + + // Close body on function exit + defer res.Body.Close() + + // Check if file exists + if !args.Force && fileExists(filename) { + return fmt.Errorf("File '%s' already exists, use --force to overwrite", filename) + } + + // Create new file + outFile, err := os.Create(filename) + if err != nil { + return fmt.Errorf("Unable to create new file '%s': %s", filename, err) + } + + // Close file on function exit + defer outFile.Close() + + // Save file to disk + _, err = io.Copy(outFile, res.Body) + if err != nil { + return fmt.Errorf("Failed saving file: %s", err) + } + + fmt.Fprintf(args.Out, "Exported '%s' with mime type: '%s'\n", filename, exportMime) + return nil } func (self *Drive) printMimes(out io.Writer, mimeType string) error { - about, err := self.service.About.Get().Fields("exportFormats").Do() - if err != nil { - return fmt.Errorf("Failed to get about: %s", err) - } - - mimes, ok := about.ExportFormats[mimeType] - if !ok { - return fmt.Errorf("File with type '%s' cannot be exported", mimeType) - } - - fmt.Fprintf(out, "Available mime types: %s\n", formatList(mimes)) - return nil + about, err := self.service.About.Get().Fields("exportFormats").Do() + if err != nil { + return fmt.Errorf("Failed to get about: %s", err) + } + + mimes, ok := about.ExportFormats[mimeType] + if !ok { + return fmt.Errorf("File with type '%s' cannot be exported", mimeType) + } + + fmt.Fprintf(out, "Available mime types: %s\n", formatList(mimes)) + return nil } func getExportMime(userMime, fileMime string) (string, error) { - if userMime != "" { - return userMime, nil - } + if userMime != "" { + return userMime, nil + } - defaultMime, ok := DefaultExportMime[fileMime] - if !ok { - return "", fmt.Errorf("File with type '%s' does not have a default export mime, and can probably not be exported", fileMime) - } + defaultMime, ok := DefaultExportMime[fileMime] + if !ok { + return "", fmt.Errorf("File with type '%s' does not have a default export mime, and can probably not be exported", fileMime) + } - return defaultMime, nil + return defaultMime, nil } func getExportFilename(name, mimeType string) string { - extensions, err := mime.ExtensionsByType(mimeType) - if err != nil || len(extensions) == 0 { - return name - } + extensions, err := mime.ExtensionsByType(mimeType) + if err != nil || len(extensions) == 0 { + return name + } - return name + extensions[0] + return name + extensions[0] } diff --git a/drive/import.go b/drive/import.go index a3d8b3bd..2ee5f1e1 100644 --- a/drive/import.go +++ b/drive/import.go @@ -1,53 +1,53 @@ package drive import ( - "io" - "io/ioutil" - "fmt" - "strings" - "mime" - "path/filepath" + "fmt" + "io" + "io/ioutil" + "mime" + "path/filepath" + "strings" ) type ImportArgs struct { - Out io.Writer - Progress io.Writer - Path string - Parents []string + Out io.Writer + Progress io.Writer + Path string + Parents []string } func (self *Drive) Import(args ImportArgs) error { - fromMime := getMimeType(args.Path) - if fromMime == "" { - return fmt.Errorf("Could not determine mime type of file") - } - - about, err := self.service.About.Get().Fields("importFormats").Do() - if err != nil { - return fmt.Errorf("Failed to get about: %s", err) - } - - toMimes, ok := about.ImportFormats[fromMime] - if !ok || len(toMimes) == 0 { - return fmt.Errorf("Mime type '%s' is not supported for import", fromMime) - } - - f, _, err := self.uploadFile(UploadArgs{ - Out: ioutil.Discard, - Progress: args.Progress, - Path: args.Path, - Parents: args.Parents, - Mime: toMimes[0], - }) - if err != nil { - return err - } - - fmt.Fprintf(args.Out, "Imported %s with mime type: '%s'\n", f.Id, toMimes[0]) - return nil + fromMime := getMimeType(args.Path) + if fromMime == "" { + return fmt.Errorf("Could not determine mime type of file") + } + + about, err := self.service.About.Get().Fields("importFormats").Do() + if err != nil { + return fmt.Errorf("Failed to get about: %s", err) + } + + toMimes, ok := about.ImportFormats[fromMime] + if !ok || len(toMimes) == 0 { + return fmt.Errorf("Mime type '%s' is not supported for import", fromMime) + } + + f, _, err := self.uploadFile(UploadArgs{ + Out: ioutil.Discard, + Progress: args.Progress, + Path: args.Path, + Parents: args.Parents, + Mime: toMimes[0], + }) + if err != nil { + return err + } + + fmt.Fprintf(args.Out, "Imported %s with mime type: '%s'\n", f.Id, toMimes[0]) + return nil } func getMimeType(path string) string { - t := mime.TypeByExtension(filepath.Ext(path)) - return strings.Split(t, ";")[0] + t := mime.TypeByExtension(filepath.Ext(path)) + return strings.Split(t, ";")[0] } diff --git a/drive/info.go b/drive/info.go index aa190a82..c6f44715 100644 --- a/drive/info.go +++ b/drive/info.go @@ -1,68 +1,68 @@ package drive import ( - "io" - "fmt" - "google.golang.org/api/drive/v3" + "fmt" + "google.golang.org/api/drive/v3" + "io" ) type FileInfoArgs struct { - Out io.Writer - Id string - SizeInBytes bool + Out io.Writer + Id string + SizeInBytes bool } func (self *Drive) Info(args FileInfoArgs) error { - f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "createdTime", "modifiedTime", "md5Checksum", "mimeType", "parents", "shared", "description", "webContentLink", "webViewLink").Do() - if err != nil { - return fmt.Errorf("Failed to get file: %s", err) - } + f, err := self.service.Files.Get(args.Id).Fields("id", "name", "size", "createdTime", "modifiedTime", "md5Checksum", "mimeType", "parents", "shared", "description", "webContentLink", "webViewLink").Do() + if err != nil { + return fmt.Errorf("Failed to get file: %s", err) + } - pathfinder := self.newPathfinder() - absPath, err := pathfinder.absPath(f) - if err != nil { - return err - } + pathfinder := self.newPathfinder() + absPath, err := pathfinder.absPath(f) + if err != nil { + return err + } - PrintFileInfo(PrintFileInfoArgs{ - Out: args.Out, - File: f, - Path: absPath, - SizeInBytes: args.SizeInBytes, - }) + PrintFileInfo(PrintFileInfoArgs{ + Out: args.Out, + File: f, + Path: absPath, + SizeInBytes: args.SizeInBytes, + }) - return nil + return nil } type PrintFileInfoArgs struct { - Out io.Writer - File *drive.File - Path string - SizeInBytes bool + Out io.Writer + File *drive.File + Path string + SizeInBytes bool } func PrintFileInfo(args PrintFileInfoArgs) { - f := args.File + f := args.File - items := []kv{ - kv{"Id", f.Id}, - kv{"Name", f.Name}, - kv{"Path", args.Path}, - kv{"Description", f.Description}, - kv{"Mime", f.MimeType}, - kv{"Size", formatSize(f.Size, args.SizeInBytes)}, - kv{"Created", formatDatetime(f.CreatedTime)}, - kv{"Modified", formatDatetime(f.ModifiedTime)}, - kv{"Md5sum", f.Md5Checksum}, - kv{"Shared", formatBool(f.Shared)}, - kv{"Parents", formatList(f.Parents)}, - kv{"ViewUrl", f.WebViewLink}, - kv{"DownloadUrl", f.WebContentLink}, - } + items := []kv{ + kv{"Id", f.Id}, + kv{"Name", f.Name}, + kv{"Path", args.Path}, + kv{"Description", f.Description}, + kv{"Mime", f.MimeType}, + kv{"Size", formatSize(f.Size, args.SizeInBytes)}, + kv{"Created", formatDatetime(f.CreatedTime)}, + kv{"Modified", formatDatetime(f.ModifiedTime)}, + kv{"Md5sum", f.Md5Checksum}, + kv{"Shared", formatBool(f.Shared)}, + kv{"Parents", formatList(f.Parents)}, + kv{"ViewUrl", f.WebViewLink}, + kv{"DownloadUrl", f.WebContentLink}, + } - for _, item := range items { - if item.value != "" { - fmt.Fprintf(args.Out, "%s: %s\n", item.key, item.value) - } - } + for _, item := range items { + if item.value != "" { + fmt.Fprintf(args.Out, "%s: %s\n", item.key, item.value) + } + } } diff --git a/drive/list.go b/drive/list.go index 73fdea51..ab8aca56 100644 --- a/drive/list.go +++ b/drive/list.go @@ -1,136 +1,136 @@ package drive import ( - "fmt" - "io" - "text/tabwriter" - "golang.org/x/net/context" - "google.golang.org/api/drive/v3" - "google.golang.org/api/googleapi" + "fmt" + "golang.org/x/net/context" + "google.golang.org/api/drive/v3" + "google.golang.org/api/googleapi" + "io" + "text/tabwriter" ) type ListFilesArgs struct { - Out io.Writer - MaxFiles int64 - NameWidth int64 - Query string - SortOrder string - SkipHeader bool - SizeInBytes bool - AbsPath bool + Out io.Writer + MaxFiles int64 + NameWidth int64 + Query string + SortOrder string + SkipHeader bool + SizeInBytes bool + AbsPath bool } func (self *Drive) List(args ListFilesArgs) (err error) { - listArgs := listAllFilesArgs{ - query: args.Query, - fields: []googleapi.Field{"nextPageToken", "files(id,name,md5Checksum,mimeType,size,createdTime,parents)"}, - sortOrder: args.SortOrder, - maxFiles: args.MaxFiles, - } - files, err := self.listAllFiles(listArgs) - if err != nil { - return fmt.Errorf("Failed to list files: %s", err) - } - - pathfinder := self.newPathfinder() - - if args.AbsPath { - // Replace name with absolute path - for _, f := range files { - f.Name, err = pathfinder.absPath(f) - if err != nil { - return err - } - } - } - - PrintFileList(PrintFileListArgs{ - Out: args.Out, - Files: files, - NameWidth: int(args.NameWidth), - SkipHeader: args.SkipHeader, - SizeInBytes: args.SizeInBytes, - }) - - return + listArgs := listAllFilesArgs{ + query: args.Query, + fields: []googleapi.Field{"nextPageToken", "files(id,name,md5Checksum,mimeType,size,createdTime,parents)"}, + sortOrder: args.SortOrder, + maxFiles: args.MaxFiles, + } + files, err := self.listAllFiles(listArgs) + if err != nil { + return fmt.Errorf("Failed to list files: %s", err) + } + + pathfinder := self.newPathfinder() + + if args.AbsPath { + // Replace name with absolute path + for _, f := range files { + f.Name, err = pathfinder.absPath(f) + if err != nil { + return err + } + } + } + + PrintFileList(PrintFileListArgs{ + Out: args.Out, + Files: files, + NameWidth: int(args.NameWidth), + SkipHeader: args.SkipHeader, + SizeInBytes: args.SizeInBytes, + }) + + return } type listAllFilesArgs struct { - query string - fields []googleapi.Field - sortOrder string - maxFiles int64 + query string + fields []googleapi.Field + sortOrder string + maxFiles int64 } func (self *Drive) listAllFiles(args listAllFilesArgs) ([]*drive.File, error) { - var files []*drive.File + var files []*drive.File - var pageSize int64 - if args.maxFiles > 0 && args.maxFiles < 1000 { - pageSize = args.maxFiles - } else { - pageSize = 1000 - } + var pageSize int64 + if args.maxFiles > 0 && args.maxFiles < 1000 { + pageSize = args.maxFiles + } else { + pageSize = 1000 + } - controlledStop := fmt.Errorf("Controlled stop") + controlledStop := fmt.Errorf("Controlled stop") - err := self.service.Files.List().Q(args.query).Fields(args.fields...).OrderBy(args.sortOrder).PageSize(pageSize).Pages(context.TODO(), func(fl *drive.FileList) error { - files = append(files, fl.Files...) + err := self.service.Files.List().Q(args.query).Fields(args.fields...).OrderBy(args.sortOrder).PageSize(pageSize).Pages(context.TODO(), func(fl *drive.FileList) error { + files = append(files, fl.Files...) - // Stop when we have all the files we need - if args.maxFiles > 0 && len(files) >= int(args.maxFiles) { - return controlledStop - } + // Stop when we have all the files we need + if args.maxFiles > 0 && len(files) >= int(args.maxFiles) { + return controlledStop + } - return nil - }) + return nil + }) - if err != nil && err != controlledStop { - return nil, err - } + if err != nil && err != controlledStop { + return nil, err + } - if args.maxFiles > 0 { - n := min(len(files), int(args.maxFiles)) - return files[:n], nil - } + if args.maxFiles > 0 { + n := min(len(files), int(args.maxFiles)) + return files[:n], nil + } - return files, nil + return files, nil } type PrintFileListArgs struct { - Out io.Writer - Files []*drive.File - NameWidth int - SkipHeader bool - SizeInBytes bool + Out io.Writer + Files []*drive.File + NameWidth int + SkipHeader bool + SizeInBytes bool } func PrintFileList(args PrintFileListArgs) { - w := new(tabwriter.Writer) - w.Init(args.Out, 0, 0, 3, ' ', 0) - - if !args.SkipHeader { - fmt.Fprintln(w, "Id\tName\tType\tSize\tCreated") - } - - for _, f := range args.Files { - fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", - f.Id, - truncateString(f.Name, args.NameWidth), - filetype(f), - formatSize(f.Size, args.SizeInBytes), - formatDatetime(f.CreatedTime), - ) - } - - w.Flush() + w := new(tabwriter.Writer) + w.Init(args.Out, 0, 0, 3, ' ', 0) + + if !args.SkipHeader { + fmt.Fprintln(w, "Id\tName\tType\tSize\tCreated") + } + + for _, f := range args.Files { + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", + f.Id, + truncateString(f.Name, args.NameWidth), + filetype(f), + formatSize(f.Size, args.SizeInBytes), + formatDatetime(f.CreatedTime), + ) + } + + w.Flush() } func filetype(f *drive.File) string { - if isDir(f) { - return "dir" - } else if isBinary(f) { - return "bin" - } - return "doc" + if isDir(f) { + return "dir" + } else if isBinary(f) { + return "bin" + } + return "doc" } diff --git a/drive/mkdir.go b/drive/mkdir.go index f6f0641c..8eea210b 100644 --- a/drive/mkdir.go +++ b/drive/mkdir.go @@ -1,39 +1,39 @@ package drive import ( - "google.golang.org/api/drive/v3" - "io" - "fmt" + "fmt" + "google.golang.org/api/drive/v3" + "io" ) const DirectoryMimeType = "application/vnd.google-apps.folder" type MkdirArgs struct { - Out io.Writer - Name string - Parents []string + Out io.Writer + Name string + Parents []string } func (self *Drive) Mkdir(args MkdirArgs) error { - f, err := self.mkdir(args) - if err != nil { - return err - } - fmt.Fprintf(args.Out, "Directory %s created\n", f.Id) - return nil + f, err := self.mkdir(args) + if err != nil { + return err + } + fmt.Fprintf(args.Out, "Directory %s created\n", f.Id) + return nil } func (self *Drive) mkdir(args MkdirArgs) (*drive.File, error) { - dstFile := &drive.File{Name: args.Name, MimeType: DirectoryMimeType} + dstFile := &drive.File{Name: args.Name, MimeType: DirectoryMimeType} - // Set parent folders - dstFile.Parents = args.Parents + // Set parent folders + dstFile.Parents = args.Parents - // Create directory - f, err := self.service.Files.Create(dstFile).Do() - if err != nil { - return nil, fmt.Errorf("Failed to create directory: %s", err) - } + // Create directory + f, err := self.service.Files.Create(dstFile).Do() + if err != nil { + return nil, fmt.Errorf("Failed to create directory: %s", err) + } - return f, nil + return f, nil } diff --git a/drive/path.go b/drive/path.go index f5d1ad54..8043a01d 100644 --- a/drive/path.go +++ b/drive/path.go @@ -1,65 +1,65 @@ package drive import ( - "fmt" - "path/filepath" - "google.golang.org/api/drive/v3" + "fmt" + "google.golang.org/api/drive/v3" + "path/filepath" ) func (self *Drive) newPathfinder() *remotePathfinder { - return &remotePathfinder{ - service: self.service.Files, - files: make(map[string]*drive.File), - } + return &remotePathfinder{ + service: self.service.Files, + files: make(map[string]*drive.File), + } } type remotePathfinder struct { - service *drive.FilesService - files map[string]*drive.File + service *drive.FilesService + files map[string]*drive.File } func (self *remotePathfinder) absPath(f *drive.File) (string, error) { - name := f.Name + name := f.Name - if len(f.Parents) == 0 { - return name, nil - } + if len(f.Parents) == 0 { + return name, nil + } - var path []string + var path []string - for { - parent, err := self.getParent(f.Parents[0]) - if err != nil { - return "", err - } + for { + parent, err := self.getParent(f.Parents[0]) + if err != nil { + return "", err + } - // Stop when we find the root dir - if len(parent.Parents) == 0 { - break - } + // Stop when we find the root dir + if len(parent.Parents) == 0 { + break + } - path = append([]string{parent.Name}, path...) - f = parent - } + path = append([]string{parent.Name}, path...) + f = parent + } - path = append(path, name) - return filepath.Join(path...), nil + path = append(path, name) + return filepath.Join(path...), nil } func (self *remotePathfinder) getParent(id string) (*drive.File, error) { - // Check cache - if f, ok := self.files[id]; ok { - return f, nil - } + // Check cache + if f, ok := self.files[id]; ok { + return f, nil + } - // Fetch file from drive - f, err := self.service.Get(id).Fields("id", "name", "parents").Do() - if err != nil { - return nil, fmt.Errorf("Failed to get file: %s", err) - } + // Fetch file from drive + f, err := self.service.Get(id).Fields("id", "name", "parents").Do() + if err != nil { + return nil, fmt.Errorf("Failed to get file: %s", err) + } - // Save in cache - self.files[f.Id] = f + // Save in cache + self.files[f.Id] = f - return f, nil + return f, nil } diff --git a/drive/progress.go b/drive/progress.go index 989191ef..bb5740c0 100644 --- a/drive/progress.go +++ b/drive/progress.go @@ -1,101 +1,101 @@ package drive import ( - "io" - "io/ioutil" - "fmt" - "time" + "fmt" + "io" + "io/ioutil" + "time" ) const MaxDrawInterval = time.Second * 1 const MaxRateInterval = time.Second * 3 func getProgressReader(r io.Reader, w io.Writer, size int64) io.Reader { - // Don't wrap reader if output is discarded or size is too small - if w == ioutil.Discard || (size > 0 && size < 1024 * 1024) { - return r - } - - return &Progress{ - Reader: r, - Writer: w, - Size: size, - } + // Don't wrap reader if output is discarded or size is too small + if w == ioutil.Discard || (size > 0 && size < 1024*1024) { + return r + } + + return &Progress{ + Reader: r, + Writer: w, + Size: size, + } } type Progress struct { - Writer io.Writer - Reader io.Reader - Size int64 - progress int64 - rate int64 - rateProgress int64 - rateUpdated time.Time - updated time.Time - done bool + Writer io.Writer + Reader io.Reader + Size int64 + progress int64 + rate int64 + rateProgress int64 + rateUpdated time.Time + updated time.Time + done bool } func (self *Progress) Read(p []byte) (int, error) { - // Read - n, err := self.Reader.Read(p) - - now := time.Now() - isLast := err != nil - - // Increment progress - newProgress := self.progress + int64(n) - self.progress = newProgress - - // Initialize rate state - if self.rateUpdated.IsZero() { - self.rateUpdated = now - self.rateProgress = newProgress - } - - // Update rate every x seconds - if self.rateUpdated.Add(MaxRateInterval).Before(now) { - self.rate = calcRate(newProgress - self.rateProgress, self.rateUpdated, now) - self.rateUpdated = now - self.rateProgress = newProgress - } - - // Draw progress every x seconds - if self.updated.Add(MaxDrawInterval).Before(now) || isLast { - self.draw(isLast) - self.updated = now - } - - // Mark as done if error occurs - self.done = isLast - - return n, err + // Read + n, err := self.Reader.Read(p) + + now := time.Now() + isLast := err != nil + + // Increment progress + newProgress := self.progress + int64(n) + self.progress = newProgress + + // Initialize rate state + if self.rateUpdated.IsZero() { + self.rateUpdated = now + self.rateProgress = newProgress + } + + // Update rate every x seconds + if self.rateUpdated.Add(MaxRateInterval).Before(now) { + self.rate = calcRate(newProgress-self.rateProgress, self.rateUpdated, now) + self.rateUpdated = now + self.rateProgress = newProgress + } + + // Draw progress every x seconds + if self.updated.Add(MaxDrawInterval).Before(now) || isLast { + self.draw(isLast) + self.updated = now + } + + // Mark as done if error occurs + self.done = isLast + + return n, err } func (self *Progress) draw(isLast bool) { - if self.done { - return - } + if self.done { + return + } - self.clear() + self.clear() - // Print progress - fmt.Fprintf(self.Writer, "%s", formatSize(self.progress, false)) + // Print progress + fmt.Fprintf(self.Writer, "%s", formatSize(self.progress, false)) - // Print total size - if self.Size > 0 { - fmt.Fprintf(self.Writer, "/%s", formatSize(self.Size, false)) - } + // Print total size + if self.Size > 0 { + fmt.Fprintf(self.Writer, "/%s", formatSize(self.Size, false)) + } - // Print rate - if self.rate > 0 { - fmt.Fprintf(self.Writer, ", Rate: %s/s", formatSize(self.rate, false)) - } + // Print rate + if self.rate > 0 { + fmt.Fprintf(self.Writer, ", Rate: %s/s", formatSize(self.rate, false)) + } - if isLast { - self.clear() - } + if isLast { + self.clear() + } } func (self *Progress) clear() { - fmt.Fprintf(self.Writer, "\r%50s\r", "") + fmt.Fprintf(self.Writer, "\r%50s\r", "") } diff --git a/drive/revision_delete.go b/drive/revision_delete.go index 88c81c66..de530410 100644 --- a/drive/revision_delete.go +++ b/drive/revision_delete.go @@ -1,31 +1,31 @@ package drive import ( - "io" - "fmt" + "fmt" + "io" ) type DeleteRevisionArgs struct { - Out io.Writer - FileId string - RevisionId string + Out io.Writer + FileId string + RevisionId string } func (self *Drive) DeleteRevision(args DeleteRevisionArgs) (err error) { - rev, err := self.service.Revisions.Get(args.FileId, args.RevisionId).Fields("originalFilename").Do() - if err != nil { - return fmt.Errorf("Failed to get revision: %s", err) - } + rev, err := self.service.Revisions.Get(args.FileId, args.RevisionId).Fields("originalFilename").Do() + if err != nil { + return fmt.Errorf("Failed to get revision: %s", err) + } - if rev.OriginalFilename == "" { - return fmt.Errorf("Deleting revisions for this file type is not supported") - } + if rev.OriginalFilename == "" { + return fmt.Errorf("Deleting revisions for this file type is not supported") + } - err = self.service.Revisions.Delete(args.FileId, args.RevisionId).Do() - if err != nil { - return fmt.Errorf("Failed to delete revision", err) - } + err = self.service.Revisions.Delete(args.FileId, args.RevisionId).Do() + if err != nil { + return fmt.Errorf("Failed to delete revision", err) + } - fmt.Fprintf(args.Out, "Deleted revision '%s'\n", args.RevisionId) - return + fmt.Fprintf(args.Out, "Deleted revision '%s'\n", args.RevisionId) + return } diff --git a/drive/revision_download.go b/drive/revision_download.go index 039cd19e..04055fae 100644 --- a/drive/revision_download.go +++ b/drive/revision_download.go @@ -1,70 +1,70 @@ package drive import ( - "fmt" - "path/filepath" - "io" - "io/ioutil" + "fmt" + "io" + "io/ioutil" + "path/filepath" ) type DownloadRevisionArgs struct { - Out io.Writer - Progress io.Writer - FileId string - RevisionId string - Path string - Force bool - Stdout bool + Out io.Writer + Progress io.Writer + FileId string + RevisionId string + Path string + Force bool + Stdout bool } func (self *Drive) DownloadRevision(args DownloadRevisionArgs) (err error) { - getRev := self.service.Revisions.Get(args.FileId, args.RevisionId) + getRev := self.service.Revisions.Get(args.FileId, args.RevisionId) - rev, err := getRev.Fields("originalFilename").Do() - if err != nil { - return fmt.Errorf("Failed to get file: %s", err) - } + rev, err := getRev.Fields("originalFilename").Do() + if err != nil { + return fmt.Errorf("Failed to get file: %s", err) + } - if rev.OriginalFilename == "" { - return fmt.Errorf("Download is not supported for this file type") - } + if rev.OriginalFilename == "" { + return fmt.Errorf("Download is not supported for this file type") + } - // Get timeout reader wrapper and context - timeoutReaderWrapper, ctx := getTimeoutReaderWrapperContext() + // Get timeout reader wrapper and context + timeoutReaderWrapper, ctx := getTimeoutReaderWrapperContext() - res, err := getRev.Context(ctx).Download() - if err != nil { - return fmt.Errorf("Failed to download file: %s", err) - } + res, err := getRev.Context(ctx).Download() + if err != nil { + return fmt.Errorf("Failed to download file: %s", err) + } - // Close body on function exit - defer res.Body.Close() + // Close body on function exit + defer res.Body.Close() - // Discard other output if file is written to stdout - out := args.Out - if args.Stdout { - out = ioutil.Discard - } + // Discard other output if file is written to stdout + out := args.Out + if args.Stdout { + out = ioutil.Discard + } - // Path to file - fpath := filepath.Join(args.Path, rev.OriginalFilename) + // Path to file + fpath := filepath.Join(args.Path, rev.OriginalFilename) - fmt.Fprintf(out, "Downloading %s -> %s\n", rev.OriginalFilename, fpath) + fmt.Fprintf(out, "Downloading %s -> %s\n", rev.OriginalFilename, fpath) - bytes, rate, err := self.saveFile(saveFileArgs{ - out: args.Out, - body: timeoutReaderWrapper(res.Body), - contentLength: res.ContentLength, - fpath: fpath, - force: args.Force, - stdout: args.Stdout, - progress: args.Progress, - }) + bytes, rate, err := self.saveFile(saveFileArgs{ + out: args.Out, + body: timeoutReaderWrapper(res.Body), + contentLength: res.ContentLength, + fpath: fpath, + force: args.Force, + stdout: args.Stdout, + progress: args.Progress, + }) - if err != nil { - return err - } + if err != nil { + return err + } - fmt.Fprintf(out, "Download complete, rate: %s/s, total size: %s\n", formatSize(rate, false), formatSize(bytes, false)) - return nil + fmt.Fprintf(out, "Download complete, rate: %s/s, total size: %s\n", formatSize(rate, false), formatSize(bytes, false)) + return nil } diff --git a/drive/revision_list.go b/drive/revision_list.go index 941fbca9..eec4dab7 100644 --- a/drive/revision_list.go +++ b/drive/revision_list.go @@ -1,62 +1,62 @@ package drive import ( - "fmt" - "io" - "text/tabwriter" - "google.golang.org/api/drive/v3" + "fmt" + "google.golang.org/api/drive/v3" + "io" + "text/tabwriter" ) type ListRevisionsArgs struct { - Out io.Writer - Id string - NameWidth int64 - SkipHeader bool - SizeInBytes bool + Out io.Writer + Id string + NameWidth int64 + SkipHeader bool + SizeInBytes bool } func (self *Drive) ListRevisions(args ListRevisionsArgs) (err error) { - revList, err := self.service.Revisions.List(args.Id).Fields("revisions(id,keepForever,size,modifiedTime,originalFilename)").Do() - if err != nil { - return fmt.Errorf("Failed listing revisions: %s", err) - } - - PrintRevisionList(PrintRevisionListArgs{ - Out: args.Out, - Revisions: revList.Revisions, - NameWidth: int(args.NameWidth), - SkipHeader: args.SkipHeader, - SizeInBytes: args.SizeInBytes, - }) - - return + revList, err := self.service.Revisions.List(args.Id).Fields("revisions(id,keepForever,size,modifiedTime,originalFilename)").Do() + if err != nil { + return fmt.Errorf("Failed listing revisions: %s", err) + } + + PrintRevisionList(PrintRevisionListArgs{ + Out: args.Out, + Revisions: revList.Revisions, + NameWidth: int(args.NameWidth), + SkipHeader: args.SkipHeader, + SizeInBytes: args.SizeInBytes, + }) + + return } type PrintRevisionListArgs struct { - Out io.Writer - Revisions []*drive.Revision - NameWidth int - SkipHeader bool - SizeInBytes bool + Out io.Writer + Revisions []*drive.Revision + NameWidth int + SkipHeader bool + SizeInBytes bool } func PrintRevisionList(args PrintRevisionListArgs) { - w := new(tabwriter.Writer) - w.Init(args.Out, 0, 0, 3, ' ', 0) - - if !args.SkipHeader { - fmt.Fprintln(w, "Id\tName\tSize\tModified\tKeepForever") - } - - for _, rev := range args.Revisions { - fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", - rev.Id, - truncateString(rev.OriginalFilename, args.NameWidth), - formatSize(rev.Size, args.SizeInBytes), - formatDatetime(rev.ModifiedTime), - formatBool(rev.KeepForever), - ) - } - - w.Flush() + w := new(tabwriter.Writer) + w.Init(args.Out, 0, 0, 3, ' ', 0) + + if !args.SkipHeader { + fmt.Fprintln(w, "Id\tName\tSize\tModified\tKeepForever") + } + + for _, rev := range args.Revisions { + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", + rev.Id, + truncateString(rev.OriginalFilename, args.NameWidth), + formatSize(rev.Size, args.SizeInBytes), + formatDatetime(rev.ModifiedTime), + formatBool(rev.KeepForever), + ) + } + + w.Flush() } diff --git a/drive/share.go b/drive/share.go index 291512a8..69b9c7d8 100644 --- a/drive/share.go +++ b/drive/share.go @@ -1,109 +1,109 @@ package drive import ( - "io" - "fmt" - "text/tabwriter" - "google.golang.org/api/drive/v3" + "fmt" + "google.golang.org/api/drive/v3" + "io" + "text/tabwriter" ) type ShareArgs struct { - Out io.Writer - FileId string - Role string - Type string - Email string - Discoverable bool + Out io.Writer + FileId string + Role string + Type string + Email string + Discoverable bool } func (self *Drive) Share(args ShareArgs) error { - permission := &drive.Permission{ - AllowFileDiscovery: args.Discoverable, - Role: args.Role, - Type: args.Type, - EmailAddress: args.Email, - } - - _, err := self.service.Permissions.Create(args.FileId, permission).Do() - if err != nil { - return fmt.Errorf("Failed to share file: %s", err) - } - - fmt.Fprintf(args.Out, "Granted %s permission to %s\n", args.Role, args.Type) - return nil + permission := &drive.Permission{ + AllowFileDiscovery: args.Discoverable, + Role: args.Role, + Type: args.Type, + EmailAddress: args.Email, + } + + _, err := self.service.Permissions.Create(args.FileId, permission).Do() + if err != nil { + return fmt.Errorf("Failed to share file: %s", err) + } + + fmt.Fprintf(args.Out, "Granted %s permission to %s\n", args.Role, args.Type) + return nil } type RevokePermissionArgs struct { - Out io.Writer - FileId string - PermissionId string + Out io.Writer + FileId string + PermissionId string } func (self *Drive) RevokePermission(args RevokePermissionArgs) error { - err := self.service.Permissions.Delete(args.FileId, args.PermissionId).Do() - if err != nil { - fmt.Errorf("Failed to revoke permission: %s", err) - return err - } - - fmt.Fprintf(args.Out, "Permission revoked\n") - return nil + err := self.service.Permissions.Delete(args.FileId, args.PermissionId).Do() + if err != nil { + fmt.Errorf("Failed to revoke permission: %s", err) + return err + } + + fmt.Fprintf(args.Out, "Permission revoked\n") + return nil } type ListPermissionsArgs struct { - Out io.Writer - FileId string + Out io.Writer + FileId string } func (self *Drive) ListPermissions(args ListPermissionsArgs) error { - permList, err := self.service.Permissions.List(args.FileId).Fields("permissions(id,role,type,domain,emailAddress,allowFileDiscovery)").Do() - if err != nil { - fmt.Errorf("Failed to list permissions: %s", err) - return err - } - - printPermissions(printPermissionsArgs{ - out: args.Out, - permissions: permList.Permissions, - }) - return nil + permList, err := self.service.Permissions.List(args.FileId).Fields("permissions(id,role,type,domain,emailAddress,allowFileDiscovery)").Do() + if err != nil { + fmt.Errorf("Failed to list permissions: %s", err) + return err + } + + printPermissions(printPermissionsArgs{ + out: args.Out, + permissions: permList.Permissions, + }) + return nil } func (self *Drive) shareAnyoneReader(fileId string) error { - permission := &drive.Permission{ - Role: "reader", - Type: "anyone", - } + permission := &drive.Permission{ + Role: "reader", + Type: "anyone", + } - _, err := self.service.Permissions.Create(fileId, permission).Do() - if err != nil { - return fmt.Errorf("Failed to share file: %s", err) - } + _, err := self.service.Permissions.Create(fileId, permission).Do() + if err != nil { + return fmt.Errorf("Failed to share file: %s", err) + } - return nil + return nil } type printPermissionsArgs struct { - out io.Writer - permissions []*drive.Permission + out io.Writer + permissions []*drive.Permission } func printPermissions(args printPermissionsArgs) { - w := new(tabwriter.Writer) - w.Init(args.out, 0, 0, 3, ' ', 0) - - fmt.Fprintln(w, "Id\tType\tRole\tEmail\tDomain\tDiscoverable") - - for _, p := range args.permissions { - fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", - p.Id, - p.Type, - p.Role, - p.EmailAddress, - p.Domain, - formatBool(p.AllowFileDiscovery), - ) - } - - w.Flush() + w := new(tabwriter.Writer) + w.Init(args.out, 0, 0, 3, ' ', 0) + + fmt.Fprintln(w, "Id\tType\tRole\tEmail\tDomain\tDiscoverable") + + for _, p := range args.permissions { + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", + p.Id, + p.Type, + p.Role, + p.EmailAddress, + p.Domain, + formatBool(p.AllowFileDiscovery), + ) + } + + w.Flush() } diff --git a/drive/sync.go b/drive/sync.go index 2124f8fc..35ab16eb 100644 --- a/drive/sync.go +++ b/drive/sync.go @@ -1,17 +1,17 @@ package drive import ( - "time" - "fmt" - "os" - "io" - "strings" - "path/filepath" - "text/tabwriter" - "github.com/soniakeys/graph" - "github.com/sabhiram/go-git-ignore" - "google.golang.org/api/drive/v3" - "google.golang.org/api/googleapi" + "fmt" + "github.com/sabhiram/go-git-ignore" + "github.com/soniakeys/graph" + "google.golang.org/api/drive/v3" + "google.golang.org/api/googleapi" + "io" + "os" + "path/filepath" + "strings" + "text/tabwriter" + "time" ) const DefaultIgnoreFile = ".gdriveignore" @@ -19,595 +19,607 @@ const DefaultIgnoreFile = ".gdriveignore" type ModTime int const ( - LocalLastModified ModTime = iota - RemoteLastModified - EqualModifiedTime + LocalLastModified ModTime = iota + RemoteLastModified + EqualModifiedTime ) type LargestSize int const ( - LocalLargestSize LargestSize = iota - RemoteLargestSize - EqualSize + LocalLargestSize LargestSize = iota + RemoteLargestSize + EqualSize ) type ConflictResolution int const ( - NoResolution ConflictResolution = iota - KeepLocal - KeepRemote - KeepLargest + NoResolution ConflictResolution = iota + KeepLocal + KeepRemote + KeepLargest ) func (self *Drive) prepareSyncFiles(localPath string, root *drive.File, cmp FileComparer) (*syncFiles, error) { - localCh := make(chan struct{files []*LocalFile; err error}) - remoteCh := make(chan struct{files []*RemoteFile; err error}) - - go func() { - files, err := prepareLocalFiles(localPath) - localCh <- struct{files []*LocalFile; err error}{files, err} - }() - - go func() { - files, err := self.prepareRemoteFiles(root, "") - remoteCh <- struct{files []*RemoteFile; err error}{files, err} - }() - - local := <-localCh - if local.err != nil { - return nil, local.err - } - - remote := <-remoteCh - if remote.err != nil { - return nil, remote.err - } - - return &syncFiles{ - root: &RemoteFile{file: root}, - local: local.files, - remote: remote.files, - compare: cmp, - }, nil + localCh := make(chan struct { + files []*LocalFile + err error + }) + remoteCh := make(chan struct { + files []*RemoteFile + err error + }) + + go func() { + files, err := prepareLocalFiles(localPath) + localCh <- struct { + files []*LocalFile + err error + }{files, err} + }() + + go func() { + files, err := self.prepareRemoteFiles(root, "") + remoteCh <- struct { + files []*RemoteFile + err error + }{files, err} + }() + + local := <-localCh + if local.err != nil { + return nil, local.err + } + + remote := <-remoteCh + if remote.err != nil { + return nil, remote.err + } + + return &syncFiles{ + root: &RemoteFile{file: root}, + local: local.files, + remote: remote.files, + compare: cmp, + }, nil } func (self *Drive) isSyncFile(id string) (bool, error) { - f, err := self.service.Files.Get(id).Fields("appProperties").Do() - if err != nil { - return false, fmt.Errorf("Failed to get file: %s", err) - } + f, err := self.service.Files.Get(id).Fields("appProperties").Do() + if err != nil { + return false, fmt.Errorf("Failed to get file: %s", err) + } - _, ok := f.AppProperties["sync"] - return ok, nil + _, ok := f.AppProperties["sync"] + return ok, nil } func prepareLocalFiles(root string) ([]*LocalFile, error) { - var files []*LocalFile - - // Get absolute root path - absRootPath, err := filepath.Abs(root) - if err != nil { - return nil, err - } - - // Prepare ignorer - shouldIgnore, err := prepareIgnorer(filepath.Join(absRootPath, DefaultIgnoreFile)) - if err != nil { - return nil, err - } - - err = filepath.Walk(absRootPath, func(absPath string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - // Skip root directory - if absPath == absRootPath { - return nil - } - - // Skip files that are not a directory or regular file - if !info.IsDir() && !info.Mode().IsRegular() { - return nil - } - - // Get relative path from root - relPath, err := filepath.Rel(absRootPath, absPath) - if err != nil { - return err - } - - // Skip file if it is ignored by ignore file - if shouldIgnore(relPath) { - return nil - } - - files = append(files, &LocalFile{ - absPath: absPath, - relPath: relPath, - info: info, - }) - - return nil - }) - - if err != nil { - return nil, fmt.Errorf("Failed to prepare local files: %s", err) - } - - return files, err + var files []*LocalFile + + // Get absolute root path + absRootPath, err := filepath.Abs(root) + if err != nil { + return nil, err + } + + // Prepare ignorer + shouldIgnore, err := prepareIgnorer(filepath.Join(absRootPath, DefaultIgnoreFile)) + if err != nil { + return nil, err + } + + err = filepath.Walk(absRootPath, func(absPath string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Skip root directory + if absPath == absRootPath { + return nil + } + + // Skip files that are not a directory or regular file + if !info.IsDir() && !info.Mode().IsRegular() { + return nil + } + + // Get relative path from root + relPath, err := filepath.Rel(absRootPath, absPath) + if err != nil { + return err + } + + // Skip file if it is ignored by ignore file + if shouldIgnore(relPath) { + return nil + } + + files = append(files, &LocalFile{ + absPath: absPath, + relPath: relPath, + info: info, + }) + + return nil + }) + + if err != nil { + return nil, fmt.Errorf("Failed to prepare local files: %s", err) + } + + return files, err } func (self *Drive) prepareRemoteFiles(rootDir *drive.File, sortOrder string) ([]*RemoteFile, error) { - // Find all files which has rootDir as root - listArgs := listAllFilesArgs{ - query: fmt.Sprintf("appProperties has {key='syncRootId' and value='%s'}", rootDir.Id), - fields: []googleapi.Field{"nextPageToken", "files(id,name,parents,md5Checksum,mimeType,size,modifiedTime)"}, - sortOrder: sortOrder, - } - files, err := self.listAllFiles(listArgs) - if err != nil { - return nil, fmt.Errorf("Failed listing files: %s", err) - } - - if err := checkFiles(files); err != nil { - return nil, err - } - - relPaths, err := prepareRemoteRelPaths(rootDir, files) - if err != nil { - return nil, err - } - - var remoteFiles []*RemoteFile - for _, f := range files { - relPath, ok := relPaths[f.Id] - if !ok { - return nil, fmt.Errorf("File %s does not have a valid parent", f.Id) - } - remoteFiles = append(remoteFiles, &RemoteFile{ - relPath: relPath, - file: f, - }) - } - - return remoteFiles, nil + // Find all files which has rootDir as root + listArgs := listAllFilesArgs{ + query: fmt.Sprintf("appProperties has {key='syncRootId' and value='%s'}", rootDir.Id), + fields: []googleapi.Field{"nextPageToken", "files(id,name,parents,md5Checksum,mimeType,size,modifiedTime)"}, + sortOrder: sortOrder, + } + files, err := self.listAllFiles(listArgs) + if err != nil { + return nil, fmt.Errorf("Failed listing files: %s", err) + } + + if err := checkFiles(files); err != nil { + return nil, err + } + + relPaths, err := prepareRemoteRelPaths(rootDir, files) + if err != nil { + return nil, err + } + + var remoteFiles []*RemoteFile + for _, f := range files { + relPath, ok := relPaths[f.Id] + if !ok { + return nil, fmt.Errorf("File %s does not have a valid parent", f.Id) + } + remoteFiles = append(remoteFiles, &RemoteFile{ + relPath: relPath, + file: f, + }) + } + + return remoteFiles, nil } func prepareRemoteRelPaths(root *drive.File, files []*drive.File) (map[string]string, error) { - // The tree only holds integer values so we use - // maps to lookup file by index and index by file id - indexLookup := map[string]graph.NI{} - fileLookup := map[graph.NI]*drive.File{} - - // All files includes root dir - allFiles := append([]*drive.File{root}, files...) - - // Prepare lookup maps - for i, f := range allFiles { - indexLookup[f.Id] = graph.NI(i) - fileLookup[graph.NI(i)] = f - } - - // This will hold 'parent index' -> 'file index' relationships - pathEnds := make([]graph.PathEnd, len(allFiles)) - - // Prepare parent -> file relationships - for i, f := range allFiles { - if f == root { - pathEnds[i] = graph.PathEnd{From: -1} - continue - } - - // Lookup index of parent - parentIdx, found := indexLookup[f.Parents[0]] - if !found { - return nil, fmt.Errorf("Could not find parent of %s (%s)", f.Id, f.Name) - } - pathEnds[i] = graph.PathEnd{From: parentIdx} - } - - // Create parent pointer tree and calculate path lengths - tree := &graph.FromList{Paths: pathEnds} - tree.RecalcLeaves() - tree.RecalcLen() - - // This will hold a map of file id => relative path - paths := map[string]string{} - - // Find relative path from root for all files - for _, f := range allFiles { - if f == root { - continue - } - - // Find nodes between root and file - nodes := tree.PathTo(indexLookup[f.Id], nil) - - // This will hold the name of all paths between root and - // file (exluding root and including file itself) - pathNames := []string{} - - // Lookup file for each node and grab name - for _, n := range nodes { - file := fileLookup[n] - if file == root { - continue - } - pathNames = append(pathNames, file.Name) - } - - // Join path names to form relative path and add to map - paths[f.Id] = filepath.Join(pathNames...) - } - - return paths, nil + // The tree only holds integer values so we use + // maps to lookup file by index and index by file id + indexLookup := map[string]graph.NI{} + fileLookup := map[graph.NI]*drive.File{} + + // All files includes root dir + allFiles := append([]*drive.File{root}, files...) + + // Prepare lookup maps + for i, f := range allFiles { + indexLookup[f.Id] = graph.NI(i) + fileLookup[graph.NI(i)] = f + } + + // This will hold 'parent index' -> 'file index' relationships + pathEnds := make([]graph.PathEnd, len(allFiles)) + + // Prepare parent -> file relationships + for i, f := range allFiles { + if f == root { + pathEnds[i] = graph.PathEnd{From: -1} + continue + } + + // Lookup index of parent + parentIdx, found := indexLookup[f.Parents[0]] + if !found { + return nil, fmt.Errorf("Could not find parent of %s (%s)", f.Id, f.Name) + } + pathEnds[i] = graph.PathEnd{From: parentIdx} + } + + // Create parent pointer tree and calculate path lengths + tree := &graph.FromList{Paths: pathEnds} + tree.RecalcLeaves() + tree.RecalcLen() + + // This will hold a map of file id => relative path + paths := map[string]string{} + + // Find relative path from root for all files + for _, f := range allFiles { + if f == root { + continue + } + + // Find nodes between root and file + nodes := tree.PathTo(indexLookup[f.Id], nil) + + // This will hold the name of all paths between root and + // file (exluding root and including file itself) + pathNames := []string{} + + // Lookup file for each node and grab name + for _, n := range nodes { + file := fileLookup[n] + if file == root { + continue + } + pathNames = append(pathNames, file.Name) + } + + // Join path names to form relative path and add to map + paths[f.Id] = filepath.Join(pathNames...) + } + + return paths, nil } func checkFiles(files []*drive.File) error { - uniq := map[string]string{} + uniq := map[string]string{} - for _, f := range files { - // Ensure all files have exactly one parent - if len(f.Parents) != 1 { - return fmt.Errorf("File %s does not have exacly one parent", f.Id) - } + for _, f := range files { + // Ensure all files have exactly one parent + if len(f.Parents) != 1 { + return fmt.Errorf("File %s does not have exacly one parent", f.Id) + } - // Ensure that there are no duplicate files - uniqKey := f.Name + f.Parents[0] - if dupeId, isDupe := uniq[uniqKey]; isDupe { - return fmt.Errorf("Found name collision between %s and %s", f.Id, dupeId) - } - uniq[uniqKey] = f.Id - } + // Ensure that there are no duplicate files + uniqKey := f.Name + f.Parents[0] + if dupeId, isDupe := uniq[uniqKey]; isDupe { + return fmt.Errorf("Found name collision between %s and %s", f.Id, dupeId) + } + uniq[uniqKey] = f.Id + } - return nil + return nil } type LocalFile struct { - absPath string - relPath string - info os.FileInfo + absPath string + relPath string + info os.FileInfo } type RemoteFile struct { - relPath string - file *drive.File + relPath string + file *drive.File } type changedFile struct { - local *LocalFile - remote *RemoteFile + local *LocalFile + remote *RemoteFile } type syncFiles struct { - root *RemoteFile - local []*LocalFile - remote []*RemoteFile - compare FileComparer + root *RemoteFile + local []*LocalFile + remote []*RemoteFile + compare FileComparer } type FileComparer interface { - Changed(*LocalFile, *RemoteFile) bool + Changed(*LocalFile, *RemoteFile) bool } func (self LocalFile) AbsPath() string { - return self.absPath + return self.absPath } func (self LocalFile) Size() int64 { - return self.info.Size() + return self.info.Size() } func (self LocalFile) Modified() time.Time { - return self.info.ModTime() + return self.info.ModTime() } func (self RemoteFile) Md5() string { - return self.file.Md5Checksum + return self.file.Md5Checksum } func (self RemoteFile) Size() int64 { - return self.file.Size + return self.file.Size } func (self RemoteFile) Modified() time.Time { - t, _ := time.Parse(time.RFC3339, self.file.ModifiedTime) - return t + t, _ := time.Parse(time.RFC3339, self.file.ModifiedTime) + return t } func (self *changedFile) compareModTime() ModTime { - localTime := self.local.Modified() - remoteTime := self.remote.Modified() + localTime := self.local.Modified() + remoteTime := self.remote.Modified() - if localTime.After(remoteTime) { - return LocalLastModified - } + if localTime.After(remoteTime) { + return LocalLastModified + } - if remoteTime.After(localTime) { - return RemoteLastModified - } + if remoteTime.After(localTime) { + return RemoteLastModified + } - return EqualModifiedTime + return EqualModifiedTime } func (self *changedFile) compareSize() LargestSize { - localSize := self.local.Size() - remoteSize := self.remote.Size() + localSize := self.local.Size() + remoteSize := self.remote.Size() - if localSize > remoteSize { - return LocalLargestSize - } + if localSize > remoteSize { + return LocalLargestSize + } - if remoteSize > localSize { - return RemoteLargestSize - } + if remoteSize > localSize { + return RemoteLargestSize + } - return EqualSize + return EqualSize } func (self *syncFiles) filterMissingRemoteDirs() []*LocalFile { - var files []*LocalFile + var files []*LocalFile - for _, lf := range self.local { - if lf.info.IsDir() && !self.existsRemote(lf) { - files = append(files, lf) - } - } + for _, lf := range self.local { + if lf.info.IsDir() && !self.existsRemote(lf) { + files = append(files, lf) + } + } - return files + return files } func (self *syncFiles) filterMissingLocalDirs() []*RemoteFile { - var files []*RemoteFile + var files []*RemoteFile - for _, rf := range self.remote { - if isDir(rf.file) && !self.existsLocal(rf) { - files = append(files, rf) - } - } + for _, rf := range self.remote { + if isDir(rf.file) && !self.existsLocal(rf) { + files = append(files, rf) + } + } - return files + return files } func (self *syncFiles) filterMissingRemoteFiles() []*LocalFile { - var files []*LocalFile + var files []*LocalFile - for _, lf := range self.local { - if !lf.info.IsDir() && !self.existsRemote(lf) { - files = append(files, lf) - } - } + for _, lf := range self.local { + if !lf.info.IsDir() && !self.existsRemote(lf) { + files = append(files, lf) + } + } - return files + return files } func (self *syncFiles) filterMissingLocalFiles() []*RemoteFile { - var files []*RemoteFile + var files []*RemoteFile - for _, rf := range self.remote { - if !isDir(rf.file) && !self.existsLocal(rf) { - files = append(files, rf) - } - } + for _, rf := range self.remote { + if !isDir(rf.file) && !self.existsLocal(rf) { + files = append(files, rf) + } + } - return files + return files } func (self *syncFiles) filterChangedLocalFiles() []*changedFile { - var files []*changedFile + var files []*changedFile - for _, lf := range self.local { - // Skip directories - if lf.info.IsDir() { - continue - } + for _, lf := range self.local { + // Skip directories + if lf.info.IsDir() { + continue + } - // Skip files that don't exist on drive - rf, found := self.findRemoteByPath(lf.relPath) - if !found { - continue - } + // Skip files that don't exist on drive + rf, found := self.findRemoteByPath(lf.relPath) + if !found { + continue + } - // Check if file has changed - if self.compare.Changed(lf, rf) { - files = append(files, &changedFile{ - local: lf, - remote: rf, - }) - } - } + // Check if file has changed + if self.compare.Changed(lf, rf) { + files = append(files, &changedFile{ + local: lf, + remote: rf, + }) + } + } - return files + return files } func (self *syncFiles) filterChangedRemoteFiles() []*changedFile { - var files []*changedFile + var files []*changedFile - for _, rf := range self.remote { - // Skip directories - if isDir(rf.file) { - continue - } + for _, rf := range self.remote { + // Skip directories + if isDir(rf.file) { + continue + } - // Skip local files that don't exist - lf, found := self.findLocalByPath(rf.relPath) - if !found { - continue - } + // Skip local files that don't exist + lf, found := self.findLocalByPath(rf.relPath) + if !found { + continue + } - // Check if file has changed - if self.compare.Changed(lf, rf) { - files = append(files, &changedFile{ - local: lf, - remote: rf, - }) - } - } + // Check if file has changed + if self.compare.Changed(lf, rf) { + files = append(files, &changedFile{ + local: lf, + remote: rf, + }) + } + } - return files + return files } func (self *syncFiles) filterExtraneousRemoteFiles() []*RemoteFile { - var files []*RemoteFile + var files []*RemoteFile - for _, rf := range self.remote { - if !self.existsLocal(rf) { - files = append(files, rf) - } - } + for _, rf := range self.remote { + if !self.existsLocal(rf) { + files = append(files, rf) + } + } - return files + return files } func (self *syncFiles) filterExtraneousLocalFiles() []*LocalFile { - var files []*LocalFile + var files []*LocalFile - for _, lf := range self.local { - if !self.existsRemote(lf) { - files = append(files, lf) - } - } + for _, lf := range self.local { + if !self.existsRemote(lf) { + files = append(files, lf) + } + } - return files + return files } func (self *syncFiles) existsRemote(lf *LocalFile) bool { - _, found := self.findRemoteByPath(lf.relPath) - return found + _, found := self.findRemoteByPath(lf.relPath) + return found } func (self *syncFiles) existsLocal(rf *RemoteFile) bool { - _, found := self.findLocalByPath(rf.relPath) - return found + _, found := self.findLocalByPath(rf.relPath) + return found } func (self *syncFiles) findRemoteByPath(relPath string) (*RemoteFile, bool) { - if relPath == "." { - return self.root, true - } + if relPath == "." { + return self.root, true + } - for _, rf := range self.remote { - if relPath == rf.relPath { - return rf, true - } - } + for _, rf := range self.remote { + if relPath == rf.relPath { + return rf, true + } + } - return nil, false + return nil, false } func (self *syncFiles) findLocalByPath(relPath string) (*LocalFile, bool) { - for _, lf := range self.local { - if relPath == lf.relPath { - return lf, true - } - } + for _, lf := range self.local { + if relPath == lf.relPath { + return lf, true + } + } - return nil, false + return nil, false } func findLocalConflicts(files []*changedFile) []*changedFile { - var conflicts []*changedFile + var conflicts []*changedFile - for _, cf := range files { - if cf.compareModTime() == LocalLastModified { - conflicts = append(conflicts, cf) - } - } + for _, cf := range files { + if cf.compareModTime() == LocalLastModified { + conflicts = append(conflicts, cf) + } + } - return conflicts + return conflicts } func findRemoteConflicts(files []*changedFile) []*changedFile { - var conflicts []*changedFile + var conflicts []*changedFile - for _, cf := range files { - if cf.compareModTime() == RemoteLastModified { - conflicts = append(conflicts, cf) - } - } + for _, cf := range files { + if cf.compareModTime() == RemoteLastModified { + conflicts = append(conflicts, cf) + } + } - return conflicts + return conflicts } type byLocalPathLength []*LocalFile func (self byLocalPathLength) Len() int { - return len(self) + return len(self) } func (self byLocalPathLength) Swap(i, j int) { - self[i], self[j] = self[j], self[i] + self[i], self[j] = self[j], self[i] } func (self byLocalPathLength) Less(i, j int) bool { - return pathLength(self[i].relPath) < pathLength(self[j].relPath) + return pathLength(self[i].relPath) < pathLength(self[j].relPath) } type byRemotePathLength []*RemoteFile func (self byRemotePathLength) Len() int { - return len(self) + return len(self) } func (self byRemotePathLength) Swap(i, j int) { - self[i], self[j] = self[j], self[i] + self[i], self[j] = self[j], self[i] } func (self byRemotePathLength) Less(i, j int) bool { - return pathLength(self[i].relPath) < pathLength(self[j].relPath) + return pathLength(self[i].relPath) < pathLength(self[j].relPath) } type byRemotePath []*RemoteFile func (self byRemotePath) Len() int { - return len(self) + return len(self) } func (self byRemotePath) Swap(i, j int) { - self[i], self[j] = self[j], self[i] + self[i], self[j] = self[j], self[i] } func (self byRemotePath) Less(i, j int) bool { - return strings.ToLower(self[i].relPath) < strings.ToLower(self[j].relPath) + return strings.ToLower(self[i].relPath) < strings.ToLower(self[j].relPath) } type ignoreFunc func(string) bool func prepareIgnorer(path string) (ignoreFunc, error) { - acceptAll := func(string) bool { - return false - } + acceptAll := func(string) bool { + return false + } - if !fileExists(path) { - return acceptAll, nil - } + if !fileExists(path) { + return acceptAll, nil + } - ignorer, err := ignore.CompileIgnoreFile(path) - if err != nil { - return acceptAll, fmt.Errorf("Failed to prepare ignorer: %s", err) - } + ignorer, err := ignore.CompileIgnoreFile(path) + if err != nil { + return acceptAll, fmt.Errorf("Failed to prepare ignorer: %s", err) + } - return ignorer.MatchesPath, nil + return ignorer.MatchesPath, nil } func formatConflicts(conflicts []*changedFile, out io.Writer) { - w := new(tabwriter.Writer) - w.Init(out, 0, 0, 3, ' ', 0) - - fmt.Fprintln(w, "Path\tSize Local\tSize Remote\tModified Local\tModified Remote") - - for _, cf := range conflicts { - fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", - truncateString(cf.local.relPath, 60), - formatSize(cf.local.Size(), false), - formatSize(cf.remote.Size(), false), - cf.local.Modified().Local().Format("Jan _2 2006 15:04:05.000"), - cf.remote.Modified().Local().Format("Jan _2 2006 15:04:05.000"), - ) - } - - w.Flush() + w := new(tabwriter.Writer) + w.Init(out, 0, 0, 3, ' ', 0) + + fmt.Fprintln(w, "Path\tSize Local\tSize Remote\tModified Local\tModified Remote") + + for _, cf := range conflicts { + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", + truncateString(cf.local.relPath, 60), + formatSize(cf.local.Size(), false), + formatSize(cf.remote.Size(), false), + cf.local.Modified().Local().Format("Jan _2 2006 15:04:05.000"), + cf.remote.Modified().Local().Format("Jan _2 2006 15:04:05.000"), + ) + } + + w.Flush() } diff --git a/drive/sync_download.go b/drive/sync_download.go index 4d84eeaa..04b50b95 100644 --- a/drive/sync_download.go +++ b/drive/sync_download.go @@ -1,325 +1,325 @@ package drive import ( - "fmt" - "io" - "os" - "sort" - "time" - "bytes" - "path/filepath" - "google.golang.org/api/googleapi" - "google.golang.org/api/drive/v3" + "bytes" + "fmt" + "google.golang.org/api/drive/v3" + "google.golang.org/api/googleapi" + "io" + "os" + "path/filepath" + "sort" + "time" ) type DownloadSyncArgs struct { - Out io.Writer - Progress io.Writer - RootId string - Path string - DryRun bool - DeleteExtraneous bool - Resolution ConflictResolution - Comparer FileComparer + Out io.Writer + Progress io.Writer + RootId string + Path string + DryRun bool + DeleteExtraneous bool + Resolution ConflictResolution + Comparer FileComparer } func (self *Drive) DownloadSync(args DownloadSyncArgs) error { - fmt.Fprintln(args.Out, "Starting sync...") - started := time.Now() - - // Get remote root dir - rootDir, err := self.getSyncRoot(args.RootId) - if err != nil { - return err - } - - fmt.Fprintln(args.Out, "Collecting file information...") - files, err := self.prepareSyncFiles(args.Path, rootDir, args.Comparer) - if err != nil { - return err - } - - // Find changed files - changedFiles := files.filterChangedRemoteFiles() - - fmt.Fprintf(args.Out, "Found %d local files and %d remote files\n", len(files.local), len(files.remote)) - - // Ensure that we don't overwrite any local changes - if args.Resolution == NoResolution { - err = ensureNoLocalModifications(changedFiles) - if err != nil { - return fmt.Errorf("Conflict detected!\nThe following files have changed and the local file are newer than it's remote counterpart:\n\n%s\nNo conflict resolution was given, aborting...", err) - } - } - - // Create missing directories - err = self.createMissingLocalDirs(files, args) - if err != nil { - return err - } - - // Download missing files - err = self.downloadMissingFiles(files, args) - if err != nil { - return err - } - - // Download files that has changed - err = self.downloadChangedFiles(changedFiles, args) - if err != nil { - return err - } - - // Delete extraneous local files - if args.DeleteExtraneous { - err = self.deleteExtraneousLocalFiles(files, args) - if err != nil { - return err - } - } - fmt.Fprintf(args.Out, "Sync finished in %s\n", time.Since(started)) - - return nil + fmt.Fprintln(args.Out, "Starting sync...") + started := time.Now() + + // Get remote root dir + rootDir, err := self.getSyncRoot(args.RootId) + if err != nil { + return err + } + + fmt.Fprintln(args.Out, "Collecting file information...") + files, err := self.prepareSyncFiles(args.Path, rootDir, args.Comparer) + if err != nil { + return err + } + + // Find changed files + changedFiles := files.filterChangedRemoteFiles() + + fmt.Fprintf(args.Out, "Found %d local files and %d remote files\n", len(files.local), len(files.remote)) + + // Ensure that we don't overwrite any local changes + if args.Resolution == NoResolution { + err = ensureNoLocalModifications(changedFiles) + if err != nil { + return fmt.Errorf("Conflict detected!\nThe following files have changed and the local file are newer than it's remote counterpart:\n\n%s\nNo conflict resolution was given, aborting...", err) + } + } + + // Create missing directories + err = self.createMissingLocalDirs(files, args) + if err != nil { + return err + } + + // Download missing files + err = self.downloadMissingFiles(files, args) + if err != nil { + return err + } + + // Download files that has changed + err = self.downloadChangedFiles(changedFiles, args) + if err != nil { + return err + } + + // Delete extraneous local files + if args.DeleteExtraneous { + err = self.deleteExtraneousLocalFiles(files, args) + if err != nil { + return err + } + } + fmt.Fprintf(args.Out, "Sync finished in %s\n", time.Since(started)) + + return nil } func (self *Drive) getSyncRoot(rootId string) (*drive.File, error) { - fields := []googleapi.Field{"id", "name", "mimeType", "appProperties"} - f, err := self.service.Files.Get(rootId).Fields(fields...).Do() - if err != nil { - return nil, fmt.Errorf("Failed to find root dir: %s", err) - } - - // Ensure file is a directory - if !isDir(f) { - return nil, fmt.Errorf("Provided root id is not a directory") - } - - // Ensure directory is a proper syncRoot - if _, ok := f.AppProperties["syncRoot"]; !ok { - return nil, fmt.Errorf("Provided id is not a sync root directory") - } - - return f, nil + fields := []googleapi.Field{"id", "name", "mimeType", "appProperties"} + f, err := self.service.Files.Get(rootId).Fields(fields...).Do() + if err != nil { + return nil, fmt.Errorf("Failed to find root dir: %s", err) + } + + // Ensure file is a directory + if !isDir(f) { + return nil, fmt.Errorf("Provided root id is not a directory") + } + + // Ensure directory is a proper syncRoot + if _, ok := f.AppProperties["syncRoot"]; !ok { + return nil, fmt.Errorf("Provided id is not a sync root directory") + } + + return f, nil } func (self *Drive) createMissingLocalDirs(files *syncFiles, args DownloadSyncArgs) error { - missingDirs := files.filterMissingLocalDirs() - missingCount := len(missingDirs) + missingDirs := files.filterMissingLocalDirs() + missingCount := len(missingDirs) - if missingCount > 0 { - fmt.Fprintf(args.Out, "\n%d local directories are missing\n", missingCount) - } + if missingCount > 0 { + fmt.Fprintf(args.Out, "\n%d local directories are missing\n", missingCount) + } - // Sort directories so that the dirs with the shortest path comes first - sort.Sort(byRemotePathLength(missingDirs)) + // Sort directories so that the dirs with the shortest path comes first + sort.Sort(byRemotePathLength(missingDirs)) - for i, rf := range missingDirs { - absPath, err := filepath.Abs(filepath.Join(args.Path, rf.relPath)) - if err != nil { - return fmt.Errorf("Failed to determine local absolute path: %s", err) - } - fmt.Fprintf(args.Out, "[%04d/%04d] Creating directory %s\n", i + 1, missingCount, filepath.Join(filepath.Base(args.Path), rf.relPath)) + for i, rf := range missingDirs { + absPath, err := filepath.Abs(filepath.Join(args.Path, rf.relPath)) + if err != nil { + return fmt.Errorf("Failed to determine local absolute path: %s", err) + } + fmt.Fprintf(args.Out, "[%04d/%04d] Creating directory %s\n", i+1, missingCount, filepath.Join(filepath.Base(args.Path), rf.relPath)) - if args.DryRun { - continue - } + if args.DryRun { + continue + } - os.MkdirAll(absPath, 0775) - } + os.MkdirAll(absPath, 0775) + } - return nil + return nil } func (self *Drive) downloadMissingFiles(files *syncFiles, args DownloadSyncArgs) error { - missingFiles := files.filterMissingLocalFiles() - missingCount := len(missingFiles) - - if missingCount > 0 { - fmt.Fprintf(args.Out, "\n%d local files are missing\n", missingCount) - } - - for i, rf := range missingFiles { - absPath, err := filepath.Abs(filepath.Join(args.Path, rf.relPath)) - if err != nil { - return fmt.Errorf("Failed to determine local absolute path: %s", err) - } - fmt.Fprintf(args.Out, "[%04d/%04d] Downloading %s -> %s\n", i + 1, missingCount, rf.relPath, filepath.Join(filepath.Base(args.Path), rf.relPath)) - - err = self.downloadRemoteFile(rf.file.Id, absPath, args, 0) - if err != nil { - return err - } - } - - return nil + missingFiles := files.filterMissingLocalFiles() + missingCount := len(missingFiles) + + if missingCount > 0 { + fmt.Fprintf(args.Out, "\n%d local files are missing\n", missingCount) + } + + for i, rf := range missingFiles { + absPath, err := filepath.Abs(filepath.Join(args.Path, rf.relPath)) + if err != nil { + return fmt.Errorf("Failed to determine local absolute path: %s", err) + } + fmt.Fprintf(args.Out, "[%04d/%04d] Downloading %s -> %s\n", i+1, missingCount, rf.relPath, filepath.Join(filepath.Base(args.Path), rf.relPath)) + + err = self.downloadRemoteFile(rf.file.Id, absPath, args, 0) + if err != nil { + return err + } + } + + return nil } func (self *Drive) downloadChangedFiles(changedFiles []*changedFile, args DownloadSyncArgs) error { - changedCount := len(changedFiles) - - if changedCount > 0 { - fmt.Fprintf(args.Out, "\n%d remote files has changed\n", changedCount) - } - - for i, cf := range changedFiles { - if skip, reason := checkLocalConflict(cf, args.Resolution); skip { - fmt.Fprintf(args.Out, "[%04d/%04d] Skipping %s (%s)\n", i + 1, changedCount, cf.remote.relPath, reason) - continue - } - - absPath, err := filepath.Abs(filepath.Join(args.Path, cf.remote.relPath)) - if err != nil { - return fmt.Errorf("Failed to determine local absolute path: %s", err) - } - fmt.Fprintf(args.Out, "[%04d/%04d] Downloading %s -> %s\n", i + 1, changedCount, cf.remote.relPath, filepath.Join(filepath.Base(args.Path), cf.remote.relPath)) - - err = self.downloadRemoteFile(cf.remote.file.Id, absPath, args, 0) - if err != nil { - return err - } - } - - return nil + changedCount := len(changedFiles) + + if changedCount > 0 { + fmt.Fprintf(args.Out, "\n%d remote files has changed\n", changedCount) + } + + for i, cf := range changedFiles { + if skip, reason := checkLocalConflict(cf, args.Resolution); skip { + fmt.Fprintf(args.Out, "[%04d/%04d] Skipping %s (%s)\n", i+1, changedCount, cf.remote.relPath, reason) + continue + } + + absPath, err := filepath.Abs(filepath.Join(args.Path, cf.remote.relPath)) + if err != nil { + return fmt.Errorf("Failed to determine local absolute path: %s", err) + } + fmt.Fprintf(args.Out, "[%04d/%04d] Downloading %s -> %s\n", i+1, changedCount, cf.remote.relPath, filepath.Join(filepath.Base(args.Path), cf.remote.relPath)) + + err = self.downloadRemoteFile(cf.remote.file.Id, absPath, args, 0) + if err != nil { + return err + } + } + + return nil } func (self *Drive) downloadRemoteFile(id, fpath string, args DownloadSyncArgs, try int) error { - if args.DryRun { - return nil - } - - // Get timeout reader wrapper and context - timeoutReaderWrapper, ctx := getTimeoutReaderWrapperContext() - - res, err := self.service.Files.Get(id).Context(ctx).Download() - if err != nil { - if isBackendError(err) && try < MaxBackendErrorRetries { - exponentialBackoffSleep(try) - try++ - return self.downloadRemoteFile(id, fpath, args, try) - } else { - return fmt.Errorf("Failed to download file: %s", err) - } - } - - // Close body on function exit - defer res.Body.Close() - - // Wrap response body in progress reader - progressReader := getProgressReader(res.Body, args.Progress, res.ContentLength) - - // Wrap reader in timeout reader - reader := timeoutReaderWrapper(progressReader) - - // Ensure any parent directories exists - if err = mkdir(fpath); err != nil { - return err - } - - // Download to tmp file - tmpPath := fpath + ".incomplete" - - // Create new file - outFile, err := os.Create(tmpPath) - if err != nil { - return fmt.Errorf("Unable to create local file: %s", err) - } - - // Save file to disk - _, err = io.Copy(outFile, reader) - if err != nil { - outFile.Close() - if try < MaxBackendErrorRetries { - exponentialBackoffSleep(try) - try++ - return self.downloadRemoteFile(id, fpath, args, try) - } else { - os.Remove(tmpPath) - return fmt.Errorf("Download was interrupted: %s", err) - } - } - - // Close file - outFile.Close() - - // Rename tmp file to proper filename - return os.Rename(tmpPath, fpath) + if args.DryRun { + return nil + } + + // Get timeout reader wrapper and context + timeoutReaderWrapper, ctx := getTimeoutReaderWrapperContext() + + res, err := self.service.Files.Get(id).Context(ctx).Download() + if err != nil { + if isBackendError(err) && try < MaxBackendErrorRetries { + exponentialBackoffSleep(try) + try++ + return self.downloadRemoteFile(id, fpath, args, try) + } else { + return fmt.Errorf("Failed to download file: %s", err) + } + } + + // Close body on function exit + defer res.Body.Close() + + // Wrap response body in progress reader + progressReader := getProgressReader(res.Body, args.Progress, res.ContentLength) + + // Wrap reader in timeout reader + reader := timeoutReaderWrapper(progressReader) + + // Ensure any parent directories exists + if err = mkdir(fpath); err != nil { + return err + } + + // Download to tmp file + tmpPath := fpath + ".incomplete" + + // Create new file + outFile, err := os.Create(tmpPath) + if err != nil { + return fmt.Errorf("Unable to create local file: %s", err) + } + + // Save file to disk + _, err = io.Copy(outFile, reader) + if err != nil { + outFile.Close() + if try < MaxBackendErrorRetries { + exponentialBackoffSleep(try) + try++ + return self.downloadRemoteFile(id, fpath, args, try) + } else { + os.Remove(tmpPath) + return fmt.Errorf("Download was interrupted: %s", err) + } + } + + // Close file + outFile.Close() + + // Rename tmp file to proper filename + return os.Rename(tmpPath, fpath) } func (self *Drive) deleteExtraneousLocalFiles(files *syncFiles, args DownloadSyncArgs) error { - extraneousFiles := files.filterExtraneousLocalFiles() - extraneousCount := len(extraneousFiles) + extraneousFiles := files.filterExtraneousLocalFiles() + extraneousCount := len(extraneousFiles) - if extraneousCount > 0 { - fmt.Fprintf(args.Out, "\n%d local files are extraneous\n", extraneousCount) - } + if extraneousCount > 0 { + fmt.Fprintf(args.Out, "\n%d local files are extraneous\n", extraneousCount) + } - // Sort files so that the files with the longest path comes first - sort.Sort(sort.Reverse(byLocalPathLength(extraneousFiles))) + // Sort files so that the files with the longest path comes first + sort.Sort(sort.Reverse(byLocalPathLength(extraneousFiles))) - for i, lf := range extraneousFiles { - fmt.Fprintf(args.Out, "[%04d/%04d] Deleting %s\n", i + 1, extraneousCount, lf.absPath) + for i, lf := range extraneousFiles { + fmt.Fprintf(args.Out, "[%04d/%04d] Deleting %s\n", i+1, extraneousCount, lf.absPath) - if args.DryRun { - continue - } + if args.DryRun { + continue + } - err := os.Remove(lf.absPath) - if err != nil { - return fmt.Errorf("Failed to delete local file: %s", err) - } - } + err := os.Remove(lf.absPath) + if err != nil { + return fmt.Errorf("Failed to delete local file: %s", err) + } + } - return nil + return nil } func checkLocalConflict(cf *changedFile, resolution ConflictResolution) (bool, string) { - // No conflict unless local file was last modified - if cf.compareModTime() != LocalLastModified { - return false, "" - } - - // Don't skip if want to keep the remote file - if resolution == KeepRemote { - return false, "" - } - - // Skip if we want to keep the local file - if resolution == KeepLocal { - return true, "conflicting file, keeping local file" - } - - if resolution == KeepLargest { - largest := cf.compareSize() - - // Skip if the local file is largest - if largest == LocalLargestSize { - return true, "conflicting file, local file is largest, keeping local" - } - - // Don't skip if the remote file is largest - if largest == RemoteLargestSize { - return false, "" - } - - // Keep local if both files have the same size - if largest == EqualSize { - return true, "conflicting file, file sizes are equal, keeping local" - } - } - - // The conditionals above should cover all cases, - // unless the programmer did something wrong, - // in which case we default to being non-destructive and skip the file - return true, "conflicting file, unhandled case" + // No conflict unless local file was last modified + if cf.compareModTime() != LocalLastModified { + return false, "" + } + + // Don't skip if want to keep the remote file + if resolution == KeepRemote { + return false, "" + } + + // Skip if we want to keep the local file + if resolution == KeepLocal { + return true, "conflicting file, keeping local file" + } + + if resolution == KeepLargest { + largest := cf.compareSize() + + // Skip if the local file is largest + if largest == LocalLargestSize { + return true, "conflicting file, local file is largest, keeping local" + } + + // Don't skip if the remote file is largest + if largest == RemoteLargestSize { + return false, "" + } + + // Keep local if both files have the same size + if largest == EqualSize { + return true, "conflicting file, file sizes are equal, keeping local" + } + } + + // The conditionals above should cover all cases, + // unless the programmer did something wrong, + // in which case we default to being non-destructive and skip the file + return true, "conflicting file, unhandled case" } func ensureNoLocalModifications(files []*changedFile) error { - conflicts := findLocalConflicts(files) - if len(conflicts) == 0 { - return nil - } - - buffer := bytes.NewBufferString("") - formatConflicts(conflicts, buffer) - return fmt.Errorf(buffer.String()) + conflicts := findLocalConflicts(files) + if len(conflicts) == 0 { + return nil + } + + buffer := bytes.NewBufferString("") + formatConflicts(conflicts, buffer) + return fmt.Errorf(buffer.String()) } diff --git a/drive/sync_list.go b/drive/sync_list.go index e0352397..c9b84fb1 100644 --- a/drive/sync_list.go +++ b/drive/sync_list.go @@ -1,97 +1,97 @@ package drive import ( - "fmt" - "sort" - "io" - "text/tabwriter" - "google.golang.org/api/googleapi" - "google.golang.org/api/drive/v3" + "fmt" + "google.golang.org/api/drive/v3" + "google.golang.org/api/googleapi" + "io" + "sort" + "text/tabwriter" ) type ListSyncArgs struct { - Out io.Writer - SkipHeader bool + Out io.Writer + SkipHeader bool } func (self *Drive) ListSync(args ListSyncArgs) error { - listArgs := listAllFilesArgs{ - query: "appProperties has {key='syncRoot' and value='true'}", - fields: []googleapi.Field{"nextPageToken", "files(id,name,mimeType,createdTime)"}, - } - files, err := self.listAllFiles(listArgs) - if err != nil { - return err - } - printSyncDirectories(files, args) - return nil + listArgs := listAllFilesArgs{ + query: "appProperties has {key='syncRoot' and value='true'}", + fields: []googleapi.Field{"nextPageToken", "files(id,name,mimeType,createdTime)"}, + } + files, err := self.listAllFiles(listArgs) + if err != nil { + return err + } + printSyncDirectories(files, args) + return nil } type ListRecursiveSyncArgs struct { - Out io.Writer - RootId string - SkipHeader bool - PathWidth int64 - SizeInBytes bool - SortOrder string + Out io.Writer + RootId string + SkipHeader bool + PathWidth int64 + SizeInBytes bool + SortOrder string } func (self *Drive) ListRecursiveSync(args ListRecursiveSyncArgs) error { - rootDir, err := self.getSyncRoot(args.RootId) - if err != nil { - return err - } - - files, err := self.prepareRemoteFiles(rootDir, args.SortOrder) - if err != nil { - return err - } - - printSyncDirContent(files, args) - return nil + rootDir, err := self.getSyncRoot(args.RootId) + if err != nil { + return err + } + + files, err := self.prepareRemoteFiles(rootDir, args.SortOrder) + if err != nil { + return err + } + + printSyncDirContent(files, args) + return nil } func printSyncDirectories(files []*drive.File, args ListSyncArgs) { - w := new(tabwriter.Writer) - w.Init(args.Out, 0, 0, 3, ' ', 0) - - if !args.SkipHeader { - fmt.Fprintln(w, "Id\tName\tCreated") - } - - for _, f := range files { - fmt.Fprintf(w, "%s\t%s\t%s\n", - f.Id, - f.Name, - formatDatetime(f.CreatedTime), - ) - } - - w.Flush() + w := new(tabwriter.Writer) + w.Init(args.Out, 0, 0, 3, ' ', 0) + + if !args.SkipHeader { + fmt.Fprintln(w, "Id\tName\tCreated") + } + + for _, f := range files { + fmt.Fprintf(w, "%s\t%s\t%s\n", + f.Id, + f.Name, + formatDatetime(f.CreatedTime), + ) + } + + w.Flush() } func printSyncDirContent(files []*RemoteFile, args ListRecursiveSyncArgs) { - if args.SortOrder == "" { - // Sort files by path - sort.Sort(byRemotePath(files)) - } - - w := new(tabwriter.Writer) - w.Init(args.Out, 0, 0, 3, ' ', 0) - - if !args.SkipHeader { - fmt.Fprintln(w, "Id\tPath\tType\tSize\tModified") - } - - for _, rf := range files { - fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", - rf.file.Id, - truncateString(rf.relPath, int(args.PathWidth)), - filetype(rf.file), - formatSize(rf.file.Size, args.SizeInBytes), - formatDatetime(rf.file.ModifiedTime), - ) - } - - w.Flush() + if args.SortOrder == "" { + // Sort files by path + sort.Sort(byRemotePath(files)) + } + + w := new(tabwriter.Writer) + w.Init(args.Out, 0, 0, 3, ' ', 0) + + if !args.SkipHeader { + fmt.Fprintln(w, "Id\tPath\tType\tSize\tModified") + } + + for _, rf := range files { + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", + rf.file.Id, + truncateString(rf.relPath, int(args.PathWidth)), + filetype(rf.file), + formatSize(rf.file.Size, args.SizeInBytes), + formatDatetime(rf.file.ModifiedTime), + ) + } + + w.Flush() } diff --git a/drive/sync_upload.go b/drive/sync_upload.go index 96442e11..0d5c2085 100644 --- a/drive/sync_upload.go +++ b/drive/sync_upload.go @@ -1,476 +1,475 @@ package drive import ( - "fmt" - "io" - "os" - "time" - "sort" - "bytes" - "path/filepath" - "google.golang.org/api/googleapi" - "google.golang.org/api/drive/v3" + "bytes" + "fmt" + "google.golang.org/api/drive/v3" + "google.golang.org/api/googleapi" + "io" + "os" + "path/filepath" + "sort" + "time" ) type UploadSyncArgs struct { - Out io.Writer - Progress io.Writer - Path string - RootId string - DryRun bool - DeleteExtraneous bool - ChunkSize int64 - Resolution ConflictResolution - Comparer FileComparer + Out io.Writer + Progress io.Writer + Path string + RootId string + DryRun bool + DeleteExtraneous bool + ChunkSize int64 + Resolution ConflictResolution + Comparer FileComparer } func (self *Drive) UploadSync(args UploadSyncArgs) error { - if args.ChunkSize > intMax() - 1 { - return fmt.Errorf("Chunk size is to big, max chunk size for this computer is %d", intMax() - 1) - } - - fmt.Fprintln(args.Out, "Starting sync...") - started := time.Now() - - // Create root directory if it does not exist - rootDir, err := self.prepareSyncRoot(args) - if err != nil { - return err - } - - fmt.Fprintln(args.Out, "Collecting local and remote file information...") - files, err := self.prepareSyncFiles(args.Path, rootDir, args.Comparer) - if err != nil { - return err - } - - // Find missing and changed files - changedFiles := files.filterChangedLocalFiles() - missingFiles := files.filterMissingRemoteFiles() - - fmt.Fprintf(args.Out, "Found %d local files and %d remote files\n", len(files.local), len(files.remote)) - - // Ensure that there is enough free space on drive - if ok, msg := self.checkRemoteFreeSpace(missingFiles, changedFiles); !ok { - return fmt.Errorf(msg) - } - - // Ensure that we don't overwrite any remote changes - if args.Resolution == NoResolution { - err = ensureNoRemoteModifications(changedFiles) - if err != nil { - return fmt.Errorf("Conflict detected!\nThe following files have changed and the remote file are newer than it's local counterpart:\n\n%s\nNo conflict resolution was given, aborting...", err) - } - } - - // Create missing directories - files, err = self.createMissingRemoteDirs(files, args) - if err != nil { - return err - } - - // Upload missing files - err = self.uploadMissingFiles(missingFiles, files, args) - if err != nil { - return err - } - - // Update modified files - err = self.updateChangedFiles(changedFiles, rootDir, args) - if err != nil { - return err - } - - // Delete extraneous files on drive - if args.DeleteExtraneous { - err = self.deleteExtraneousRemoteFiles(files, args) - if err != nil { - return err - } - } - fmt.Fprintf(args.Out, "Sync finished in %s\n", time.Since(started)) - - return nil + if args.ChunkSize > intMax()-1 { + return fmt.Errorf("Chunk size is to big, max chunk size for this computer is %d", intMax()-1) + } + + fmt.Fprintln(args.Out, "Starting sync...") + started := time.Now() + + // Create root directory if it does not exist + rootDir, err := self.prepareSyncRoot(args) + if err != nil { + return err + } + + fmt.Fprintln(args.Out, "Collecting local and remote file information...") + files, err := self.prepareSyncFiles(args.Path, rootDir, args.Comparer) + if err != nil { + return err + } + + // Find missing and changed files + changedFiles := files.filterChangedLocalFiles() + missingFiles := files.filterMissingRemoteFiles() + + fmt.Fprintf(args.Out, "Found %d local files and %d remote files\n", len(files.local), len(files.remote)) + + // Ensure that there is enough free space on drive + if ok, msg := self.checkRemoteFreeSpace(missingFiles, changedFiles); !ok { + return fmt.Errorf(msg) + } + + // Ensure that we don't overwrite any remote changes + if args.Resolution == NoResolution { + err = ensureNoRemoteModifications(changedFiles) + if err != nil { + return fmt.Errorf("Conflict detected!\nThe following files have changed and the remote file are newer than it's local counterpart:\n\n%s\nNo conflict resolution was given, aborting...", err) + } + } + + // Create missing directories + files, err = self.createMissingRemoteDirs(files, args) + if err != nil { + return err + } + + // Upload missing files + err = self.uploadMissingFiles(missingFiles, files, args) + if err != nil { + return err + } + + // Update modified files + err = self.updateChangedFiles(changedFiles, rootDir, args) + if err != nil { + return err + } + + // Delete extraneous files on drive + if args.DeleteExtraneous { + err = self.deleteExtraneousRemoteFiles(files, args) + if err != nil { + return err + } + } + fmt.Fprintf(args.Out, "Sync finished in %s\n", time.Since(started)) + + return nil } func (self *Drive) prepareSyncRoot(args UploadSyncArgs) (*drive.File, error) { - fields := []googleapi.Field{"id", "name", "mimeType", "appProperties"} - f, err := self.service.Files.Get(args.RootId).Fields(fields...).Do() - if err != nil { - return nil, fmt.Errorf("Failed to find root dir: %s", err) - } - - // Ensure file is a directory - if !isDir(f) { - return nil, fmt.Errorf("Provided root id is not a directory") - } - - // Return directory if syncRoot property is already set - if _, ok := f.AppProperties["syncRoot"]; ok { - return f, nil - } - - // This is the first time this directory have been used for sync - // Check if the directory is empty - isEmpty, err := self.dirIsEmpty(f.Id) - if err != nil { - return nil, fmt.Errorf("Failed to check if root dir is empty: %s", err) - } - - // Ensure that the directory is empty - if !isEmpty { - return nil, fmt.Errorf("Root directoy is not empty, the initial sync requires an empty directory") - } - - // Update directory with syncRoot property - dstFile := &drive.File{ - AppProperties: map[string]string{"sync": "true", "syncRoot": "true"}, - } - - f, err = self.service.Files.Update(f.Id, dstFile).Fields(fields...).Do() - if err != nil { - return nil, fmt.Errorf("Failed to update root directory: %s", err) - } - - return f, nil + fields := []googleapi.Field{"id", "name", "mimeType", "appProperties"} + f, err := self.service.Files.Get(args.RootId).Fields(fields...).Do() + if err != nil { + return nil, fmt.Errorf("Failed to find root dir: %s", err) + } + + // Ensure file is a directory + if !isDir(f) { + return nil, fmt.Errorf("Provided root id is not a directory") + } + + // Return directory if syncRoot property is already set + if _, ok := f.AppProperties["syncRoot"]; ok { + return f, nil + } + + // This is the first time this directory have been used for sync + // Check if the directory is empty + isEmpty, err := self.dirIsEmpty(f.Id) + if err != nil { + return nil, fmt.Errorf("Failed to check if root dir is empty: %s", err) + } + + // Ensure that the directory is empty + if !isEmpty { + return nil, fmt.Errorf("Root directoy is not empty, the initial sync requires an empty directory") + } + + // Update directory with syncRoot property + dstFile := &drive.File{ + AppProperties: map[string]string{"sync": "true", "syncRoot": "true"}, + } + + f, err = self.service.Files.Update(f.Id, dstFile).Fields(fields...).Do() + if err != nil { + return nil, fmt.Errorf("Failed to update root directory: %s", err) + } + + return f, nil } func (self *Drive) createMissingRemoteDirs(files *syncFiles, args UploadSyncArgs) (*syncFiles, error) { - missingDirs := files.filterMissingRemoteDirs() - missingCount := len(missingDirs) - - if missingCount > 0 { - fmt.Fprintf(args.Out, "\n%d remote directories are missing\n", missingCount) - } - - // Sort directories so that the dirs with the shortest path comes first - sort.Sort(byLocalPathLength(missingDirs)) - - for i, lf := range missingDirs { - parentPath := parentFilePath(lf.relPath) - parent, ok := files.findRemoteByPath(parentPath) - if !ok { - return nil, fmt.Errorf("Could not find remote directory with path '%s'", parentPath) - } - - fmt.Fprintf(args.Out, "[%04d/%04d] Creating directory %s\n", i + 1, missingCount, filepath.Join(files.root.file.Name, lf.relPath)) - - f, err := self.createMissingRemoteDir(createMissingRemoteDirArgs{ - name: lf.info.Name(), - parentId: parent.file.Id, - rootId: args.RootId, - dryRun: args.DryRun, - try: 0, - }) - if err != nil { - return nil, err - } - - files.remote = append(files.remote, &RemoteFile{ - relPath: lf.relPath, - file: f, - }) - } - - return files, nil + missingDirs := files.filterMissingRemoteDirs() + missingCount := len(missingDirs) + + if missingCount > 0 { + fmt.Fprintf(args.Out, "\n%d remote directories are missing\n", missingCount) + } + + // Sort directories so that the dirs with the shortest path comes first + sort.Sort(byLocalPathLength(missingDirs)) + + for i, lf := range missingDirs { + parentPath := parentFilePath(lf.relPath) + parent, ok := files.findRemoteByPath(parentPath) + if !ok { + return nil, fmt.Errorf("Could not find remote directory with path '%s'", parentPath) + } + + fmt.Fprintf(args.Out, "[%04d/%04d] Creating directory %s\n", i+1, missingCount, filepath.Join(files.root.file.Name, lf.relPath)) + + f, err := self.createMissingRemoteDir(createMissingRemoteDirArgs{ + name: lf.info.Name(), + parentId: parent.file.Id, + rootId: args.RootId, + dryRun: args.DryRun, + try: 0, + }) + if err != nil { + return nil, err + } + + files.remote = append(files.remote, &RemoteFile{ + relPath: lf.relPath, + file: f, + }) + } + + return files, nil } type createMissingRemoteDirArgs struct { - name string - parentId string - rootId string - dryRun bool - try int + name string + parentId string + rootId string + dryRun bool + try int } func (self *Drive) uploadMissingFiles(missingFiles []*LocalFile, files *syncFiles, args UploadSyncArgs) error { - missingCount := len(missingFiles) + missingCount := len(missingFiles) - if missingCount > 0 { - fmt.Fprintf(args.Out, "\n%d remote files are missing\n", missingCount) - } + if missingCount > 0 { + fmt.Fprintf(args.Out, "\n%d remote files are missing\n", missingCount) + } - for i, lf := range missingFiles { - parentPath := parentFilePath(lf.relPath) - parent, ok := files.findRemoteByPath(parentPath) - if !ok { - return fmt.Errorf("Could not find remote directory with path '%s'", parentPath) - } + for i, lf := range missingFiles { + parentPath := parentFilePath(lf.relPath) + parent, ok := files.findRemoteByPath(parentPath) + if !ok { + return fmt.Errorf("Could not find remote directory with path '%s'", parentPath) + } - fmt.Fprintf(args.Out, "[%04d/%04d] Uploading %s -> %s\n", i + 1, missingCount, lf.relPath, filepath.Join(files.root.file.Name, lf.relPath)) + fmt.Fprintf(args.Out, "[%04d/%04d] Uploading %s -> %s\n", i+1, missingCount, lf.relPath, filepath.Join(files.root.file.Name, lf.relPath)) - err := self.uploadMissingFile(parent.file.Id, lf, args, 0) - if err != nil { - return err - } - } + err := self.uploadMissingFile(parent.file.Id, lf, args, 0) + if err != nil { + return err + } + } - return nil + return nil } func (self *Drive) updateChangedFiles(changedFiles []*changedFile, root *drive.File, args UploadSyncArgs) error { - changedCount := len(changedFiles) + changedCount := len(changedFiles) - if changedCount > 0 { - fmt.Fprintf(args.Out, "\n%d local files has changed\n", changedCount) - } + if changedCount > 0 { + fmt.Fprintf(args.Out, "\n%d local files has changed\n", changedCount) + } - for i, cf := range changedFiles { - if skip, reason := checkRemoteConflict(cf, args.Resolution); skip { - fmt.Fprintf(args.Out, "[%04d/%04d] Skipping %s (%s)\n", i + 1, changedCount, cf.local.relPath, reason) - continue - } + for i, cf := range changedFiles { + if skip, reason := checkRemoteConflict(cf, args.Resolution); skip { + fmt.Fprintf(args.Out, "[%04d/%04d] Skipping %s (%s)\n", i+1, changedCount, cf.local.relPath, reason) + continue + } - fmt.Fprintf(args.Out, "[%04d/%04d] Updating %s -> %s\n", i + 1, changedCount, cf.local.relPath, filepath.Join(root.Name, cf.local.relPath)) + fmt.Fprintf(args.Out, "[%04d/%04d] Updating %s -> %s\n", i+1, changedCount, cf.local.relPath, filepath.Join(root.Name, cf.local.relPath)) - err := self.updateChangedFile(cf, args, 0) - if err != nil { - return err - } - } + err := self.updateChangedFile(cf, args, 0) + if err != nil { + return err + } + } - return nil + return nil } func (self *Drive) deleteExtraneousRemoteFiles(files *syncFiles, args UploadSyncArgs) error { - extraneousFiles := files.filterExtraneousRemoteFiles() - extraneousCount := len(extraneousFiles) + extraneousFiles := files.filterExtraneousRemoteFiles() + extraneousCount := len(extraneousFiles) - if extraneousCount > 0 { - fmt.Fprintf(args.Out, "\n%d remote files are extraneous\n", extraneousCount) - } + if extraneousCount > 0 { + fmt.Fprintf(args.Out, "\n%d remote files are extraneous\n", extraneousCount) + } - // Sort files so that the files with the longest path comes first - sort.Sort(sort.Reverse(byRemotePathLength(extraneousFiles))) + // Sort files so that the files with the longest path comes first + sort.Sort(sort.Reverse(byRemotePathLength(extraneousFiles))) - for i, rf := range extraneousFiles { - fmt.Fprintf(args.Out, "[%04d/%04d] Deleting %s\n", i + 1, extraneousCount, filepath.Join(files.root.file.Name, rf.relPath)) + for i, rf := range extraneousFiles { + fmt.Fprintf(args.Out, "[%04d/%04d] Deleting %s\n", i+1, extraneousCount, filepath.Join(files.root.file.Name, rf.relPath)) - err := self.deleteRemoteFile(rf, args, 0) - if err != nil { - return err - } - } + err := self.deleteRemoteFile(rf, args, 0) + if err != nil { + return err + } + } - return nil + return nil } func (self *Drive) createMissingRemoteDir(args createMissingRemoteDirArgs) (*drive.File, error) { - dstFile := &drive.File{ - Name: args.name, - MimeType: DirectoryMimeType, - Parents: []string{args.parentId}, - AppProperties: map[string]string{"sync": "true", "syncRootId": args.rootId}, - } - - if args.dryRun { - return dstFile, nil - } - - f, err := self.service.Files.Create(dstFile).Do() - if err != nil { - if isBackendError(err) && args.try < MaxBackendErrorRetries { - exponentialBackoffSleep(args.try) - args.try++ - return self.createMissingRemoteDir(args) - } else { - return nil, fmt.Errorf("Failed to create directory: %s", err) - } - } - - return f, nil + dstFile := &drive.File{ + Name: args.name, + MimeType: DirectoryMimeType, + Parents: []string{args.parentId}, + AppProperties: map[string]string{"sync": "true", "syncRootId": args.rootId}, + } + + if args.dryRun { + return dstFile, nil + } + + f, err := self.service.Files.Create(dstFile).Do() + if err != nil { + if isBackendError(err) && args.try < MaxBackendErrorRetries { + exponentialBackoffSleep(args.try) + args.try++ + return self.createMissingRemoteDir(args) + } else { + return nil, fmt.Errorf("Failed to create directory: %s", err) + } + } + + return f, nil } func (self *Drive) uploadMissingFile(parentId string, lf *LocalFile, args UploadSyncArgs, try int) error { - if args.DryRun { - return nil - } - - srcFile, err := os.Open(lf.absPath) - if err != nil { - return fmt.Errorf("Failed to open file: %s", err) - } - - // Close file on function exit - defer srcFile.Close() - - // Instantiate drive file - dstFile := &drive.File{ - Name: lf.info.Name(), - Parents: []string{parentId}, - AppProperties: map[string]string{"sync": "true", "syncRootId": args.RootId}, - } - - // Chunk size option - chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) - - // Wrap file in progress reader - progressReader := getProgressReader(srcFile, args.Progress, lf.info.Size()) - - // Wrap reader in timeout reader - reader, ctx := getTimeoutReaderContext(progressReader) - - _, err = self.service.Files.Create(dstFile).Fields("id", "name", "size", "md5Checksum").Context(ctx).Media(reader, chunkSize).Do() - if err != nil { - if isBackendError(err) && try < MaxBackendErrorRetries { - exponentialBackoffSleep(try) - try++ - return self.uploadMissingFile(parentId, lf, args, try) - } else { - return fmt.Errorf("Failed to upload file: %s", err) - } - } - - return nil + if args.DryRun { + return nil + } + + srcFile, err := os.Open(lf.absPath) + if err != nil { + return fmt.Errorf("Failed to open file: %s", err) + } + + // Close file on function exit + defer srcFile.Close() + + // Instantiate drive file + dstFile := &drive.File{ + Name: lf.info.Name(), + Parents: []string{parentId}, + AppProperties: map[string]string{"sync": "true", "syncRootId": args.RootId}, + } + + // Chunk size option + chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) + + // Wrap file in progress reader + progressReader := getProgressReader(srcFile, args.Progress, lf.info.Size()) + + // Wrap reader in timeout reader + reader, ctx := getTimeoutReaderContext(progressReader) + + _, err = self.service.Files.Create(dstFile).Fields("id", "name", "size", "md5Checksum").Context(ctx).Media(reader, chunkSize).Do() + if err != nil { + if isBackendError(err) && try < MaxBackendErrorRetries { + exponentialBackoffSleep(try) + try++ + return self.uploadMissingFile(parentId, lf, args, try) + } else { + return fmt.Errorf("Failed to upload file: %s", err) + } + } + + return nil } func (self *Drive) updateChangedFile(cf *changedFile, args UploadSyncArgs, try int) error { - if args.DryRun { - return nil - } - - srcFile, err := os.Open(cf.local.absPath) - if err != nil { - return fmt.Errorf("Failed to open file: %s", err) - } - - // Close file on function exit - defer srcFile.Close() - - // Instantiate drive file - dstFile := &drive.File{} - - // Chunk size option - chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) - - // Wrap file in progress reader - progressReader := getProgressReader(srcFile, args.Progress, cf.local.info.Size()) - - // Wrap reader in timeout reader - reader, ctx := getTimeoutReaderContext(progressReader) - - _, err = self.service.Files.Update(cf.remote.file.Id, dstFile).Context(ctx).Media(reader, chunkSize).Do() - if err != nil { - if isBackendError(err) && try < MaxBackendErrorRetries { - exponentialBackoffSleep(try) - try++ - return self.updateChangedFile(cf, args, try) - } else { - return fmt.Errorf("Failed to update file: %s", err) - } - } - - return nil + if args.DryRun { + return nil + } + + srcFile, err := os.Open(cf.local.absPath) + if err != nil { + return fmt.Errorf("Failed to open file: %s", err) + } + + // Close file on function exit + defer srcFile.Close() + + // Instantiate drive file + dstFile := &drive.File{} + + // Chunk size option + chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) + + // Wrap file in progress reader + progressReader := getProgressReader(srcFile, args.Progress, cf.local.info.Size()) + + // Wrap reader in timeout reader + reader, ctx := getTimeoutReaderContext(progressReader) + + _, err = self.service.Files.Update(cf.remote.file.Id, dstFile).Context(ctx).Media(reader, chunkSize).Do() + if err != nil { + if isBackendError(err) && try < MaxBackendErrorRetries { + exponentialBackoffSleep(try) + try++ + return self.updateChangedFile(cf, args, try) + } else { + return fmt.Errorf("Failed to update file: %s", err) + } + } + + return nil } func (self *Drive) deleteRemoteFile(rf *RemoteFile, args UploadSyncArgs, try int) error { - if args.DryRun { - return nil - } - - - err := self.service.Files.Delete(rf.file.Id).Do() - if err != nil { - if isBackendError(err) && try < MaxBackendErrorRetries { - exponentialBackoffSleep(try) - try++ - return self.deleteRemoteFile(rf, args, try) - } else { - return fmt.Errorf("Failed to delete file: %s", err) - } - } - - return nil + if args.DryRun { + return nil + } + + err := self.service.Files.Delete(rf.file.Id).Do() + if err != nil { + if isBackendError(err) && try < MaxBackendErrorRetries { + exponentialBackoffSleep(try) + try++ + return self.deleteRemoteFile(rf, args, try) + } else { + return fmt.Errorf("Failed to delete file: %s", err) + } + } + + return nil } func (self *Drive) dirIsEmpty(id string) (bool, error) { - query := fmt.Sprintf("'%s' in parents", id) - fileList, err := self.service.Files.List().Q(query).Do() - if err != nil { - return false, fmt.Errorf("Empty dir check failed: ", err) - } + query := fmt.Sprintf("'%s' in parents", id) + fileList, err := self.service.Files.List().Q(query).Do() + if err != nil { + return false, fmt.Errorf("Empty dir check failed: ", err) + } - return len(fileList.Files) == 0, nil + return len(fileList.Files) == 0, nil } func checkRemoteConflict(cf *changedFile, resolution ConflictResolution) (bool, string) { - // No conflict unless remote file was last modified - if cf.compareModTime() != RemoteLastModified { - return false, "" - } - - // Don't skip if want to keep the local file - if resolution == KeepLocal { - return false, "" - } - - // Skip if we want to keep the remote file - if resolution == KeepRemote { - return true, "conflicting file, keeping remote file" - } - - if resolution == KeepLargest { - largest := cf.compareSize() - - // Skip if the remote file is largest - if largest == RemoteLargestSize { - return true, "conflicting file, remote file is largest, keeping remote" - } - - // Don't skip if the local file is largest - if largest == LocalLargestSize { - return false, "" - } - - // Keep remote if both files have the same size - if largest == EqualSize { - return true, "conflicting file, file sizes are equal, keeping remote" - } - } - - // The conditionals above should cover all cases, - // unless the programmer did something wrong, - // in which case we default to being non-destructive and skip the file - return true, "conflicting file, unhandled case" + // No conflict unless remote file was last modified + if cf.compareModTime() != RemoteLastModified { + return false, "" + } + + // Don't skip if want to keep the local file + if resolution == KeepLocal { + return false, "" + } + + // Skip if we want to keep the remote file + if resolution == KeepRemote { + return true, "conflicting file, keeping remote file" + } + + if resolution == KeepLargest { + largest := cf.compareSize() + + // Skip if the remote file is largest + if largest == RemoteLargestSize { + return true, "conflicting file, remote file is largest, keeping remote" + } + + // Don't skip if the local file is largest + if largest == LocalLargestSize { + return false, "" + } + + // Keep remote if both files have the same size + if largest == EqualSize { + return true, "conflicting file, file sizes are equal, keeping remote" + } + } + + // The conditionals above should cover all cases, + // unless the programmer did something wrong, + // in which case we default to being non-destructive and skip the file + return true, "conflicting file, unhandled case" } func ensureNoRemoteModifications(files []*changedFile) error { - conflicts := findRemoteConflicts(files) - if len(conflicts) == 0 { - return nil - } - - buffer := bytes.NewBufferString("") - formatConflicts(conflicts, buffer) - return fmt.Errorf(buffer.String()) + conflicts := findRemoteConflicts(files) + if len(conflicts) == 0 { + return nil + } + + buffer := bytes.NewBufferString("") + formatConflicts(conflicts, buffer) + return fmt.Errorf(buffer.String()) } func (self *Drive) checkRemoteFreeSpace(missingFiles []*LocalFile, changedFiles []*changedFile) (bool, string) { - about, err := self.service.About.Get().Fields("storageQuota").Do() - if err != nil { - return false, fmt.Sprintf("Failed to determine free space: %s", err) - } + about, err := self.service.About.Get().Fields("storageQuota").Do() + if err != nil { + return false, fmt.Sprintf("Failed to determine free space: %s", err) + } - quota := about.StorageQuota - if quota.Limit == 0 { - return true, "" - } + quota := about.StorageQuota + if quota.Limit == 0 { + return true, "" + } - freeSpace := quota.Limit - quota.Usage + freeSpace := quota.Limit - quota.Usage - var totalSize int64 + var totalSize int64 - for _, lf := range missingFiles { - totalSize += lf.Size() - } + for _, lf := range missingFiles { + totalSize += lf.Size() + } - for _, cf := range changedFiles { - totalSize += cf.local.Size() - } + for _, cf := range changedFiles { + totalSize += cf.local.Size() + } - if totalSize > freeSpace { - return false, fmt.Sprintf("Not enough free space, have %s need %s", formatSize(freeSpace, false), formatSize(totalSize, false)) - } + if totalSize > freeSpace { + return false, fmt.Sprintf("Not enough free space, have %s need %s", formatSize(freeSpace, false), formatSize(totalSize, false)) + } - return true, "" + return true, "" } diff --git a/drive/timeout_reader.go b/drive/timeout_reader.go index 878911bd..9930c12c 100644 --- a/drive/timeout_reader.go +++ b/drive/timeout_reader.go @@ -1,10 +1,10 @@ package drive import ( - "io" - "time" - "sync" - "golang.org/x/net/context" + "golang.org/x/net/context" + "io" + "sync" + "time" ) const MaxIdleTimeout = time.Second * 120 @@ -13,89 +13,89 @@ const TimeoutTimerInterval = time.Second * 10 type timeoutReaderWrapper func(io.Reader) io.Reader func getTimeoutReaderWrapperContext() (timeoutReaderWrapper, context.Context) { - ctx, cancel := context.WithCancel(context.TODO()) - wrapper := func(r io.Reader) io.Reader { - return getTimeoutReader(r, cancel) - } - return wrapper, ctx + ctx, cancel := context.WithCancel(context.TODO()) + wrapper := func(r io.Reader) io.Reader { + return getTimeoutReader(r, cancel) + } + return wrapper, ctx } func getTimeoutReaderContext(r io.Reader) (io.Reader, context.Context) { - ctx, cancel := context.WithCancel(context.TODO()) - return getTimeoutReader(r, cancel), ctx + ctx, cancel := context.WithCancel(context.TODO()) + return getTimeoutReader(r, cancel), ctx } func getTimeoutReader(r io.Reader, cancel context.CancelFunc) io.Reader { - return &TimeoutReader{ - reader: r, - cancel: cancel, - mutex: &sync.Mutex{}, - } + return &TimeoutReader{ + reader: r, + cancel: cancel, + mutex: &sync.Mutex{}, + } } type TimeoutReader struct { - reader io.Reader - cancel context.CancelFunc - lastActivity time.Time - timer *time.Timer - mutex *sync.Mutex - done bool + reader io.Reader + cancel context.CancelFunc + lastActivity time.Time + timer *time.Timer + mutex *sync.Mutex + done bool } func (self *TimeoutReader) Read(p []byte) (int, error) { - if self.timer == nil { - self.startTimer() - } + if self.timer == nil { + self.startTimer() + } - self.mutex.Lock() + self.mutex.Lock() - // Read - n, err := self.reader.Read(p) + // Read + n, err := self.reader.Read(p) - self.lastActivity = time.Now() - self.done = (err != nil) + self.lastActivity = time.Now() + self.done = (err != nil) - self.mutex.Unlock() + self.mutex.Unlock() - if self.done { - self.stopTimer() - } + if self.done { + self.stopTimer() + } - return n, err + return n, err } func (self *TimeoutReader) startTimer() { - self.mutex.Lock() - defer self.mutex.Unlock() + self.mutex.Lock() + defer self.mutex.Unlock() - if !self.done { - self.timer = time.AfterFunc(TimeoutTimerInterval, self.timeout) - } + if !self.done { + self.timer = time.AfterFunc(TimeoutTimerInterval, self.timeout) + } } func (self *TimeoutReader) stopTimer() { - self.mutex.Lock() - defer self.mutex.Unlock() + self.mutex.Lock() + defer self.mutex.Unlock() - if self.timer != nil { - self.timer.Stop() - } + if self.timer != nil { + self.timer.Stop() + } } func (self *TimeoutReader) timeout() { - self.mutex.Lock() + self.mutex.Lock() - if self.done { - self.mutex.Unlock() - return - } + if self.done { + self.mutex.Unlock() + return + } - if time.Since(self.lastActivity) > MaxIdleTimeout { - self.cancel() - self.mutex.Unlock() - return - } + if time.Since(self.lastActivity) > MaxIdleTimeout { + self.cancel() + self.mutex.Unlock() + return + } - self.mutex.Unlock() - self.startTimer() + self.mutex.Unlock() + self.startTimer() } diff --git a/drive/update.go b/drive/update.go index 5bdd0408..156eb2f0 100644 --- a/drive/update.go +++ b/drive/update.go @@ -1,75 +1,75 @@ package drive import ( - "fmt" - "mime" - "time" - "io" - "path/filepath" - "google.golang.org/api/googleapi" - "google.golang.org/api/drive/v3" + "fmt" + "google.golang.org/api/drive/v3" + "google.golang.org/api/googleapi" + "io" + "mime" + "path/filepath" + "time" ) type UpdateArgs struct { - Out io.Writer - Progress io.Writer - Id string - Path string - Name string - Parents []string - Mime string - Recursive bool - ChunkSize int64 + Out io.Writer + Progress io.Writer + Id string + Path string + Name string + Parents []string + Mime string + Recursive bool + ChunkSize int64 } func (self *Drive) Update(args UpdateArgs) error { - srcFile, srcFileInfo, err := openFile(args.Path) - if err != nil { - return fmt.Errorf("Failed to open file: %s", err) - } + srcFile, srcFileInfo, err := openFile(args.Path) + if err != nil { + return fmt.Errorf("Failed to open file: %s", err) + } - defer srcFile.Close() + defer srcFile.Close() - // Instantiate empty drive file - dstFile := &drive.File{} + // Instantiate empty drive file + dstFile := &drive.File{} - // Use provided file name or use filename - if args.Name == "" { - dstFile.Name = filepath.Base(srcFileInfo.Name()) - } else { - dstFile.Name = args.Name - } + // Use provided file name or use filename + if args.Name == "" { + dstFile.Name = filepath.Base(srcFileInfo.Name()) + } else { + dstFile.Name = args.Name + } - // Set provided mime type or get type based on file extension - if args.Mime == "" { - dstFile.MimeType = mime.TypeByExtension(filepath.Ext(dstFile.Name)) - } else { - dstFile.MimeType = args.Mime - } + // Set provided mime type or get type based on file extension + if args.Mime == "" { + dstFile.MimeType = mime.TypeByExtension(filepath.Ext(dstFile.Name)) + } else { + dstFile.MimeType = args.Mime + } - // Set parent folders - dstFile.Parents = args.Parents + // Set parent folders + dstFile.Parents = args.Parents - // Chunk size option - chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) + // Chunk size option + chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) - // Wrap file in progress reader - progressReader := getProgressReader(srcFile, args.Progress, srcFileInfo.Size()) + // Wrap file in progress reader + progressReader := getProgressReader(srcFile, args.Progress, srcFileInfo.Size()) - // Wrap reader in timeout reader - reader, ctx := getTimeoutReaderContext(progressReader) + // Wrap reader in timeout reader + reader, ctx := getTimeoutReaderContext(progressReader) - fmt.Fprintf(args.Out, "Uploading %s\n", args.Path) - started := time.Now() + fmt.Fprintf(args.Out, "Uploading %s\n", args.Path) + started := time.Now() - f, err := self.service.Files.Update(args.Id, dstFile).Fields("id", "name", "size").Context(ctx).Media(reader, chunkSize).Do() - if err != nil { - return fmt.Errorf("Failed to upload file: %s", err) - } + f, err := self.service.Files.Update(args.Id, dstFile).Fields("id", "name", "size").Context(ctx).Media(reader, chunkSize).Do() + if err != nil { + return fmt.Errorf("Failed to upload file: %s", err) + } - // Calculate average upload rate - rate := calcRate(f.Size, started, time.Now()) + // Calculate average upload rate + rate := calcRate(f.Size, started, time.Now()) - fmt.Fprintf(args.Out, "Updated %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(f.Size, false)) - return nil + fmt.Fprintf(args.Out, "Updated %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(f.Size, false)) + return nil } diff --git a/drive/upload.go b/drive/upload.go index 0bbc0147..c42bebdc 100644 --- a/drive/upload.go +++ b/drive/upload.go @@ -1,249 +1,249 @@ package drive import ( - "fmt" - "mime" - "os" - "io" - "time" - "path/filepath" - "google.golang.org/api/googleapi" - "google.golang.org/api/drive/v3" + "fmt" + "google.golang.org/api/drive/v3" + "google.golang.org/api/googleapi" + "io" + "mime" + "os" + "path/filepath" + "time" ) type UploadArgs struct { - Out io.Writer - Progress io.Writer - Path string - Name string - Parents []string - Mime string - Recursive bool - Share bool - Delete bool - ChunkSize int64 + Out io.Writer + Progress io.Writer + Path string + Name string + Parents []string + Mime string + Recursive bool + Share bool + Delete bool + ChunkSize int64 } func (self *Drive) Upload(args UploadArgs) error { - if args.ChunkSize > intMax() - 1 { - return fmt.Errorf("Chunk size is to big, max chunk size for this computer is %d", intMax() - 1) - } - - // Ensure that none of the parents are sync dirs - for _, parent := range args.Parents { - isSyncDir, err := self.isSyncFile(parent) - if err != nil { - return err - } - - if isSyncDir { - return fmt.Errorf("%s is a sync directory, use 'sync upload' instead", parent) - } - } - - if args.Recursive { - return self.uploadRecursive(args) - } - - info, err := os.Stat(args.Path) - if err != nil { - return fmt.Errorf("Failed stat file: %s", err) - } - - if info.IsDir() { - return fmt.Errorf("'%s' is a directory, use --recursive to upload directories", info.Name()) - } - - f, rate, err := self.uploadFile(args) - if err != nil { - return err - } - fmt.Fprintf(args.Out, "Uploaded %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(f.Size, false)) - - if args.Share { - err = self.shareAnyoneReader(f.Id) - if err != nil { - return err - } - - fmt.Fprintf(args.Out, "File is readable by anyone at %s\n", f.WebContentLink) - } - - if args.Delete { - err = os.Remove(args.Path) - if err != nil { - return fmt.Errorf("Failed to delete file: %s", err) - } - fmt.Fprintf(args.Out, "Removed %s\n", args.Path) - } - - return nil + if args.ChunkSize > intMax()-1 { + return fmt.Errorf("Chunk size is to big, max chunk size for this computer is %d", intMax()-1) + } + + // Ensure that none of the parents are sync dirs + for _, parent := range args.Parents { + isSyncDir, err := self.isSyncFile(parent) + if err != nil { + return err + } + + if isSyncDir { + return fmt.Errorf("%s is a sync directory, use 'sync upload' instead", parent) + } + } + + if args.Recursive { + return self.uploadRecursive(args) + } + + info, err := os.Stat(args.Path) + if err != nil { + return fmt.Errorf("Failed stat file: %s", err) + } + + if info.IsDir() { + return fmt.Errorf("'%s' is a directory, use --recursive to upload directories", info.Name()) + } + + f, rate, err := self.uploadFile(args) + if err != nil { + return err + } + fmt.Fprintf(args.Out, "Uploaded %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(f.Size, false)) + + if args.Share { + err = self.shareAnyoneReader(f.Id) + if err != nil { + return err + } + + fmt.Fprintf(args.Out, "File is readable by anyone at %s\n", f.WebContentLink) + } + + if args.Delete { + err = os.Remove(args.Path) + if err != nil { + return fmt.Errorf("Failed to delete file: %s", err) + } + fmt.Fprintf(args.Out, "Removed %s\n", args.Path) + } + + return nil } func (self *Drive) uploadRecursive(args UploadArgs) error { - info, err := os.Stat(args.Path) - if err != nil { - return fmt.Errorf("Failed stat file: %s", err) - } - - if info.IsDir() { - args.Name = "" - return self.uploadDirectory(args) - } else { - _, _, err := self.uploadFile(args) - return err - } + info, err := os.Stat(args.Path) + if err != nil { + return fmt.Errorf("Failed stat file: %s", err) + } + + if info.IsDir() { + args.Name = "" + return self.uploadDirectory(args) + } else { + _, _, err := self.uploadFile(args) + return err + } } func (self *Drive) uploadDirectory(args UploadArgs) error { - srcFile, srcFileInfo, err := openFile(args.Path) - if err != nil { - return err - } - - // Close file on function exit - defer srcFile.Close() - - fmt.Fprintf(args.Out, "Creating directory %s\n", srcFileInfo.Name()) - // Make directory on drive - f, err := self.mkdir(MkdirArgs{ - Out: args.Out, - Name: srcFileInfo.Name(), - Parents: args.Parents, - }) - if err != nil { - return err - } - - // Read files from directory - names, err := srcFile.Readdirnames(0) - if err != nil && err != io.EOF { - return fmt.Errorf("Failed reading directory: %s", err) - } - - for _, name := range names { - // Copy args and set new path and parents - newArgs := args - newArgs.Path = filepath.Join(args.Path, name) - newArgs.Parents = []string{f.Id} - - // Upload - err = self.uploadRecursive(newArgs) - if err != nil { - return err - } - } - - return nil + srcFile, srcFileInfo, err := openFile(args.Path) + if err != nil { + return err + } + + // Close file on function exit + defer srcFile.Close() + + fmt.Fprintf(args.Out, "Creating directory %s\n", srcFileInfo.Name()) + // Make directory on drive + f, err := self.mkdir(MkdirArgs{ + Out: args.Out, + Name: srcFileInfo.Name(), + Parents: args.Parents, + }) + if err != nil { + return err + } + + // Read files from directory + names, err := srcFile.Readdirnames(0) + if err != nil && err != io.EOF { + return fmt.Errorf("Failed reading directory: %s", err) + } + + for _, name := range names { + // Copy args and set new path and parents + newArgs := args + newArgs.Path = filepath.Join(args.Path, name) + newArgs.Parents = []string{f.Id} + + // Upload + err = self.uploadRecursive(newArgs) + if err != nil { + return err + } + } + + return nil } func (self *Drive) uploadFile(args UploadArgs) (*drive.File, int64, error) { - srcFile, srcFileInfo, err := openFile(args.Path) - if err != nil { - return nil, 0, err - } + srcFile, srcFileInfo, err := openFile(args.Path) + if err != nil { + return nil, 0, err + } - // Close file on function exit - defer srcFile.Close() + // Close file on function exit + defer srcFile.Close() - // Instantiate empty drive file - dstFile := &drive.File{} + // Instantiate empty drive file + dstFile := &drive.File{} - // Use provided file name or use filename - if args.Name == "" { - dstFile.Name = filepath.Base(srcFileInfo.Name()) - } else { - dstFile.Name = args.Name - } + // Use provided file name or use filename + if args.Name == "" { + dstFile.Name = filepath.Base(srcFileInfo.Name()) + } else { + dstFile.Name = args.Name + } - // Set provided mime type or get type based on file extension - if args.Mime == "" { - dstFile.MimeType = mime.TypeByExtension(filepath.Ext(dstFile.Name)) - } else { - dstFile.MimeType = args.Mime - } + // Set provided mime type or get type based on file extension + if args.Mime == "" { + dstFile.MimeType = mime.TypeByExtension(filepath.Ext(dstFile.Name)) + } else { + dstFile.MimeType = args.Mime + } - // Set parent folders - dstFile.Parents = args.Parents + // Set parent folders + dstFile.Parents = args.Parents - // Chunk size option - chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) + // Chunk size option + chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) - // Wrap file in progress reader - progressReader := getProgressReader(srcFile, args.Progress, srcFileInfo.Size()) + // Wrap file in progress reader + progressReader := getProgressReader(srcFile, args.Progress, srcFileInfo.Size()) - // Wrap reader in timeout reader - reader, ctx := getTimeoutReaderContext(progressReader) + // Wrap reader in timeout reader + reader, ctx := getTimeoutReaderContext(progressReader) - fmt.Fprintf(args.Out, "Uploading %s\n", args.Path) - started := time.Now() + fmt.Fprintf(args.Out, "Uploading %s\n", args.Path) + started := time.Now() - f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size", "md5Checksum", "webContentLink").Context(ctx).Media(reader, chunkSize).Do() - if err != nil { - return nil, 0, fmt.Errorf("Failed to upload file: %s", err) - } + f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size", "md5Checksum", "webContentLink").Context(ctx).Media(reader, chunkSize).Do() + if err != nil { + return nil, 0, fmt.Errorf("Failed to upload file: %s", err) + } - // Calculate average upload rate - rate := calcRate(f.Size, started, time.Now()) + // Calculate average upload rate + rate := calcRate(f.Size, started, time.Now()) - return f, rate, nil + return f, rate, nil } type UploadStreamArgs struct { - Out io.Writer - In io.Reader - Name string - Parents []string - Mime string - Share bool - ChunkSize int64 - Progress io.Writer + Out io.Writer + In io.Reader + Name string + Parents []string + Mime string + Share bool + ChunkSize int64 + Progress io.Writer } func (self *Drive) UploadStream(args UploadStreamArgs) error { - if args.ChunkSize > intMax() - 1 { - return fmt.Errorf("Chunk size is to big, max chunk size for this computer is %d", intMax() - 1) - } + if args.ChunkSize > intMax()-1 { + return fmt.Errorf("Chunk size is to big, max chunk size for this computer is %d", intMax()-1) + } - // Instantiate empty drive file - dstFile := &drive.File{Name: args.Name} + // Instantiate empty drive file + dstFile := &drive.File{Name: args.Name} - // Set mime type if provided - if args.Mime != "" { - dstFile.MimeType = args.Mime - } + // Set mime type if provided + if args.Mime != "" { + dstFile.MimeType = args.Mime + } - // Set parent folders - dstFile.Parents = args.Parents + // Set parent folders + dstFile.Parents = args.Parents - // Chunk size option - chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) + // Chunk size option + chunkSize := googleapi.ChunkSize(int(args.ChunkSize)) - // Wrap file in progress reader - progressReader := getProgressReader(args.In, args.Progress, 0) + // Wrap file in progress reader + progressReader := getProgressReader(args.In, args.Progress, 0) - // Wrap reader in timeout reader - reader, ctx := getTimeoutReaderContext(progressReader) + // Wrap reader in timeout reader + reader, ctx := getTimeoutReaderContext(progressReader) - fmt.Fprintf(args.Out, "Uploading %s\n", dstFile.Name) - started := time.Now() + fmt.Fprintf(args.Out, "Uploading %s\n", dstFile.Name) + started := time.Now() - f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size", "webContentLink").Context(ctx).Media(reader, chunkSize).Do() - if err != nil { - return fmt.Errorf("Failed to upload file: %s", err) - } + f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size", "webContentLink").Context(ctx).Media(reader, chunkSize).Do() + if err != nil { + return fmt.Errorf("Failed to upload file: %s", err) + } - // Calculate average upload rate - rate := calcRate(f.Size, started, time.Now()) + // Calculate average upload rate + rate := calcRate(f.Size, started, time.Now()) - fmt.Fprintf(args.Out, "Uploaded %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(f.Size, false)) - if args.Share { - err = self.shareAnyoneReader(f.Id) - if err != nil { - return err - } + fmt.Fprintf(args.Out, "Uploaded %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(f.Size, false)) + if args.Share { + err = self.shareAnyoneReader(f.Id) + if err != nil { + return err + } - fmt.Fprintf(args.Out, "File is readable by anyone at %s\n", f.WebContentLink) - } - return nil + fmt.Fprintf(args.Out, "File is readable by anyone at %s\n", f.WebContentLink) + } + return nil } diff --git a/drive/util.go b/drive/util.go index 8891e121..181b9b90 100644 --- a/drive/util.go +++ b/drive/util.go @@ -1,169 +1,169 @@ package drive import ( - "os" - "fmt" - "path/filepath" - "strings" - "strconv" - "unicode/utf8" - "math" - "time" + "fmt" + "math" + "os" + "path/filepath" + "strconv" + "strings" + "time" + "unicode/utf8" ) type kv struct { - key string - value string + key string + value string } func formatList(a []string) string { - return strings.Join(a, ", ") + return strings.Join(a, ", ") } func formatSize(bytes int64, forceBytes bool) string { - if bytes == 0 { - return "" - } + if bytes == 0 { + return "" + } - if forceBytes { - return fmt.Sprintf("%v B", bytes) - } + if forceBytes { + return fmt.Sprintf("%v B", bytes) + } - units := []string{"B", "KB", "MB", "GB", "TB", "PB"} + units := []string{"B", "KB", "MB", "GB", "TB", "PB"} - var i int - value := float64(bytes) + var i int + value := float64(bytes) - for value > 1000 { - value /= 1000 - i++ - } - return fmt.Sprintf("%.1f %s", value, units[i]) + for value > 1000 { + value /= 1000 + i++ + } + return fmt.Sprintf("%.1f %s", value, units[i]) } func calcRate(bytes int64, start, end time.Time) int64 { - seconds := float64(end.Sub(start).Seconds()) - if seconds < 1.0 { - return bytes - } - return round(float64(bytes) / seconds) + seconds := float64(end.Sub(start).Seconds()) + if seconds < 1.0 { + return bytes + } + return round(float64(bytes) / seconds) } func round(n float64) int64 { - if n < 0 { - return int64(math.Ceil(n - 0.5)) - } - return int64(math.Floor(n + 0.5)) + if n < 0 { + return int64(math.Ceil(n - 0.5)) + } + return int64(math.Floor(n + 0.5)) } func formatBool(b bool) string { - return strings.Title(strconv.FormatBool(b)) + return strings.Title(strconv.FormatBool(b)) } func formatDatetime(iso string) string { - t, err := time.Parse(time.RFC3339, iso) - if err != nil { - return iso - } - local := t.Local() - year, month, day := local.Date() - hour, min, sec := local.Clock() - return fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, min, sec) + t, err := time.Parse(time.RFC3339, iso) + if err != nil { + return iso + } + local := t.Local() + year, month, day := local.Date() + hour, min, sec := local.Clock() + return fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, min, sec) } // Truncates string to given max length, and inserts ellipsis into // the middle of the string to signify that the string has been truncated func truncateString(str string, maxRunes int) string { - indicator := "..." + indicator := "..." - // Number of runes in string - runeCount := utf8.RuneCountInString(str) + // Number of runes in string + runeCount := utf8.RuneCountInString(str) - // Return input string if length of input string is less than max length - // Input string is also returned if max length is less than 9 which is the minmal supported length - if runeCount <= maxRunes || maxRunes < 9 { - return str - } + // Return input string if length of input string is less than max length + // Input string is also returned if max length is less than 9 which is the minmal supported length + if runeCount <= maxRunes || maxRunes < 9 { + return str + } - // Number of remaining runes to be removed - remaining := (runeCount - maxRunes) + utf8.RuneCountInString(indicator) + // Number of remaining runes to be removed + remaining := (runeCount - maxRunes) + utf8.RuneCountInString(indicator) - var truncated string - var skip bool + var truncated string + var skip bool - for leftOffset, char := range str { - rightOffset := runeCount - (leftOffset + remaining) + for leftOffset, char := range str { + rightOffset := runeCount - (leftOffset + remaining) - // Start skipping chars when the left and right offsets are equal - // Or in the case where we wont be able to do an even split: when the left offset is larger than the right offset - if leftOffset == rightOffset || (leftOffset > rightOffset && !skip) { - skip = true - truncated += indicator - } + // Start skipping chars when the left and right offsets are equal + // Or in the case where we wont be able to do an even split: when the left offset is larger than the right offset + if leftOffset == rightOffset || (leftOffset > rightOffset && !skip) { + skip = true + truncated += indicator + } - if skip && remaining > 0 { - // Skip char and decrement the remaining skip counter - remaining-- - continue - } + if skip && remaining > 0 { + // Skip char and decrement the remaining skip counter + remaining-- + continue + } - // Add char to result string - truncated += string(char) - } + // Add char to result string + truncated += string(char) + } - // Return truncated string - return truncated + // Return truncated string + return truncated } func fileExists(path string) bool { - _, err := os.Stat(path) - if err == nil { - return true - } - return false + _, err := os.Stat(path) + if err == nil { + return true + } + return false } func mkdir(path string) error { - dir := filepath.Dir(path) - if fileExists(dir) { - return nil - } - return os.MkdirAll(dir, 0775) + dir := filepath.Dir(path) + if fileExists(dir) { + return nil + } + return os.MkdirAll(dir, 0775) } func intMax() int64 { - return 1 << (strconv.IntSize - 1) - 1 + return 1<<(strconv.IntSize-1) - 1 } func pathLength(path string) int { - return strings.Count(path, string(os.PathSeparator)) + return strings.Count(path, string(os.PathSeparator)) } func parentFilePath(path string) string { - dir, _ := filepath.Split(path) - return filepath.Dir(dir) + dir, _ := filepath.Split(path) + return filepath.Dir(dir) } func pow(x int, y int) int { - f := math.Pow(float64(x), float64(y)) - return int(f) + f := math.Pow(float64(x), float64(y)) + return int(f) } func min(x int, y int) int { - n := math.Min(float64(x), float64(y)) - return int(n) + n := math.Min(float64(x), float64(y)) + return int(n) } func openFile(path string) (*os.File, os.FileInfo, error) { - f, err := os.Open(path) - if err != nil { - return nil, nil, fmt.Errorf("Failed to open file: %s", err) - } + f, err := os.Open(path) + if err != nil { + return nil, nil, fmt.Errorf("Failed to open file: %s", err) + } - info, err := f.Stat() - if err != nil { - return nil, nil, fmt.Errorf("Failed getting file metadata: %s", err) - } + info, err := f.Stat() + if err != nil { + return nil, nil, fmt.Errorf("Failed getting file metadata: %s", err) + } - return f, info, nil + return f, info, nil } diff --git a/gdrive.go b/gdrive.go index d9181c63..94e000c4 100644 --- a/gdrive.go +++ b/gdrive.go @@ -1,9 +1,9 @@ package main import ( + "./cli" "fmt" "os" - "./cli" ) const Name = "gdrive" @@ -17,752 +17,752 @@ const DefaultUploadChunkSize = 8 * 1024 * 1024 const DefaultQuery = "trashed = false and 'me' in owners" const DefaultShareRole = "reader" const DefaultShareType = "anyone" -var DefaultConfigDir = GetDefaultConfigDir() +var DefaultConfigDir = GetDefaultConfigDir() func main() { - globalFlags := []cli.Flag{ - cli.StringFlag{ - Name: "configDir", - Patterns: []string{"-c", "--config"}, - Description: fmt.Sprintf("Application path, default: %s", DefaultConfigDir), - DefaultValue: DefaultConfigDir, - }, - cli.StringFlag{ - Name: "refreshToken", - Patterns: []string{"--refresh-token"}, - Description: "Oauth refresh token used to get access token (for advanced users)", - }, - cli.StringFlag{ - Name: "accessToken", - Patterns: []string{"--access-token"}, - Description: "Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users)", - }, - } + globalFlags := []cli.Flag{ + cli.StringFlag{ + Name: "configDir", + Patterns: []string{"-c", "--config"}, + Description: fmt.Sprintf("Application path, default: %s", DefaultConfigDir), + DefaultValue: DefaultConfigDir, + }, + cli.StringFlag{ + Name: "refreshToken", + Patterns: []string{"--refresh-token"}, + Description: "Oauth refresh token used to get access token (for advanced users)", + }, + cli.StringFlag{ + Name: "accessToken", + Patterns: []string{"--access-token"}, + Description: "Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users)", + }, + } - handlers := []*cli.Handler{ - &cli.Handler{ - Pattern: "[global] list [options]", - Description: "List files", - Callback: listHandler, - FlagGroups: cli.FlagGroups{ - cli.NewFlagGroup("global", globalFlags...), - cli.NewFlagGroup("options", - cli.IntFlag{ - Name: "maxFiles", - Patterns: []string{"-m", "--max"}, - Description: fmt.Sprintf("Max files to list, default: %d", DefaultMaxFiles), - DefaultValue: DefaultMaxFiles, - }, - cli.StringFlag{ - Name: "query", - Patterns: []string{"-q", "--query"}, - Description: fmt.Sprintf(`Default query: "%s". See https://developers.google.com/drive/search-parameters`, DefaultQuery), - DefaultValue: DefaultQuery, - }, - cli.StringFlag{ - Name: "sortOrder", - Patterns: []string{"--order"}, - Description: "Sort order. See https://godoc.org/google.golang.org/api/drive/v3#FilesListCall.OrderBy", - }, - cli.IntFlag{ - Name: "nameWidth", - Patterns: []string{"--name-width"}, - Description: fmt.Sprintf("Width of name column, default: %d, minimum: 9, use 0 for full width", DefaultNameWidth), - DefaultValue: DefaultNameWidth, - }, - cli.BoolFlag{ - Name: "absPath", - Patterns: []string{"--absolute"}, - Description: "Show absolute path to file (will only show path from first parent)", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "skipHeader", - Patterns: []string{"--no-header"}, - Description: "Dont print the header", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "sizeInBytes", - Patterns: []string{"--bytes"}, - Description: "Size in bytes", - OmitValue: true, - }, - ), - }, - }, - &cli.Handler{ - Pattern: "[global] download [options] ", - Description: "Download file or directory", - Callback: downloadHandler, - FlagGroups: cli.FlagGroups{ - cli.NewFlagGroup("global", globalFlags...), - cli.NewFlagGroup("options", - cli.BoolFlag{ - Name: "force", - Patterns: []string{"-f", "--force"}, - Description: "Overwrite existing file", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "recursive", - Patterns: []string{"-r", "--recursive"}, - Description: "Download directory recursively, documents will be skipped", - OmitValue: true, - }, - cli.StringFlag{ - Name: "path", - Patterns: []string{"--path"}, - Description: "Download path", - }, - cli.BoolFlag{ - Name: "delete", - Patterns: []string{"--delete"}, - Description: "Delete remote file when download is successful", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "noProgress", - Patterns: []string{"--no-progress"}, - Description: "Hide progress", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "stdout", - Patterns: []string{"--stdout"}, - Description: "Write file content to stdout", - OmitValue: true, - }, - ), - }, - }, - &cli.Handler{ - Pattern: "[global] download query [options] ", - Description: "Download all files and directories matching query", - Callback: downloadQueryHandler, - FlagGroups: cli.FlagGroups{ - cli.NewFlagGroup("global", globalFlags...), - cli.NewFlagGroup("options", - cli.BoolFlag{ - Name: "force", - Patterns: []string{"-f", "--force"}, - Description: "Overwrite existing file", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "recursive", - Patterns: []string{"-r", "--recursive"}, - Description: "Download directories recursively, documents will be skipped", - OmitValue: true, - }, - cli.StringFlag{ - Name: "path", - Patterns: []string{"--path"}, - Description: "Download path", - }, - cli.BoolFlag{ - Name: "noProgress", - Patterns: []string{"--no-progress"}, - Description: "Hide progress", - OmitValue: true, - }, - ), - }, - }, - &cli.Handler{ - Pattern: "[global] upload [options] ", - Description: "Upload file or directory", - Callback: uploadHandler, - FlagGroups: cli.FlagGroups{ - cli.NewFlagGroup("global", globalFlags...), - cli.NewFlagGroup("options", - cli.BoolFlag{ - Name: "recursive", - Patterns: []string{"-r", "--recursive"}, - Description: "Upload directory recursively", - OmitValue: true, - }, - cli.StringSliceFlag{ - Name: "parent", - Patterns: []string{"-p", "--parent"}, - Description: "Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents", - }, - cli.StringFlag{ - Name: "name", - Patterns: []string{"--name"}, - Description: "Filename", - }, - cli.BoolFlag{ - Name: "noProgress", - Patterns: []string{"--no-progress"}, - Description: "Hide progress", - OmitValue: true, - }, - cli.StringFlag{ - Name: "mime", - Patterns: []string{"--mime"}, - Description: "Force mime type", - }, - cli.BoolFlag{ - Name: "share", - Patterns: []string{"--share"}, - Description: "Share file", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "delete", - Patterns: []string{"--delete"}, - Description: "Delete local file when upload is successful", - OmitValue: true, - }, - cli.IntFlag{ - Name: "chunksize", - Patterns: []string{"--chunksize"}, - Description: fmt.Sprintf("Set chunk size in bytes, default: %d", DefaultUploadChunkSize), - DefaultValue: DefaultUploadChunkSize, - }, - ), - }, - }, - &cli.Handler{ - Pattern: "[global] upload - [options] ", - Description: "Upload file from stdin", - Callback: uploadStdinHandler, - FlagGroups: cli.FlagGroups{ - cli.NewFlagGroup("global", globalFlags...), - cli.NewFlagGroup("options", - cli.StringSliceFlag{ - Name: "parent", - Patterns: []string{"-p", "--parent"}, - Description: "Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents", - }, - cli.IntFlag{ - Name: "chunksize", - Patterns: []string{"--chunksize"}, - Description: fmt.Sprintf("Set chunk size in bytes, default: %d", DefaultUploadChunkSize), - DefaultValue: DefaultUploadChunkSize, - }, - cli.StringFlag{ - Name: "mime", - Patterns: []string{"--mime"}, - Description: "Force mime type", - }, - cli.BoolFlag{ - Name: "share", - Patterns: []string{"--share"}, - Description: "Share file", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "noProgress", - Patterns: []string{"--no-progress"}, - Description: "Hide progress", - OmitValue: true, - }, - ), - }, - }, - &cli.Handler{ - Pattern: "[global] update [options] ", - Description: "Update file, this creates a new revision of the file", - Callback: updateHandler, - FlagGroups: cli.FlagGroups{ - cli.NewFlagGroup("global", globalFlags...), - cli.NewFlagGroup("options", - cli.StringSliceFlag{ - Name: "parent", - Patterns: []string{"-p", "--parent"}, - Description: "Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents", - }, - cli.StringFlag{ - Name: "name", - Patterns: []string{"--name"}, - Description: "Filename", - }, - cli.BoolFlag{ - Name: "noProgress", - Patterns: []string{"--no-progress"}, - Description: "Hide progress", - OmitValue: true, - }, - cli.StringFlag{ - Name: "mime", - Patterns: []string{"--mime"}, - Description: "Force mime type", - }, - cli.IntFlag{ - Name: "chunksize", - Patterns: []string{"--chunksize"}, - Description: fmt.Sprintf("Set chunk size in bytes, default: %d", DefaultUploadChunkSize), - DefaultValue: DefaultUploadChunkSize, - }, - ), - }, - }, - &cli.Handler{ - Pattern: "[global] info [options] ", - Description: "Show file info", - Callback: infoHandler, - FlagGroups: cli.FlagGroups{ - cli.NewFlagGroup("global", globalFlags...), - cli.NewFlagGroup("options", - cli.BoolFlag{ - Name: "sizeInBytes", - Patterns: []string{"--bytes"}, - Description: "Show size in bytes", - OmitValue: true, - }, - ), - }, - }, - &cli.Handler{ - Pattern: "[global] mkdir [options] ", - Description: "Create directory", - Callback: mkdirHandler, - FlagGroups: cli.FlagGroups{ - cli.NewFlagGroup("global", globalFlags...), - cli.NewFlagGroup("options", - cli.StringSliceFlag{ - Name: "parent", - Patterns: []string{"-p", "--parent"}, - Description: "Parent id of created directory, can be specified multiple times to give many parents", - }, - ), - }, - }, - &cli.Handler{ - Pattern: "[global] share [options] ", - Description: "Share file or directory", - Callback: shareHandler, - FlagGroups: cli.FlagGroups{ - cli.NewFlagGroup("global", globalFlags...), - cli.NewFlagGroup("options", - cli.StringFlag{ - Name: "role", - Patterns: []string{"--role"}, - Description: fmt.Sprintf("Share role: owner/writer/commenter/reader, default: %s", DefaultShareRole), - DefaultValue: DefaultShareRole, - }, - cli.StringFlag{ - Name: "type", - Patterns: []string{"--type"}, - Description: fmt.Sprintf("Share type: user/group/domain/anyone, default: %s", DefaultShareType), - DefaultValue: DefaultShareType, - }, - cli.StringFlag{ - Name: "email", - Patterns: []string{"--email"}, - Description: "The email address of the user or group to share the file with. Requires 'user' or 'group' as type", - }, - cli.BoolFlag{ - Name: "discoverable", - Patterns: []string{"--discoverable"}, - Description: "Make file discoverable by search engines", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "revoke", - Patterns: []string{"--revoke"}, - Description: "Delete all sharing permissions (owner roles will be skipped)", - OmitValue: true, - }, - ), - }, - }, - &cli.Handler{ - Pattern: "[global] share list ", - Description: "List files permissions", - Callback: shareListHandler, - FlagGroups: cli.FlagGroups{ - cli.NewFlagGroup("global", globalFlags...), - }, - }, - &cli.Handler{ - Pattern: "[global] share revoke ", - Description: "Revoke permission", - Callback: shareRevokeHandler, - FlagGroups: cli.FlagGroups{ - cli.NewFlagGroup("global", globalFlags...), - }, - }, - &cli.Handler{ - Pattern: "[global] delete [options] ", - Description: "Delete file or directory", - Callback: deleteHandler, - FlagGroups: cli.FlagGroups{ - cli.NewFlagGroup("global", globalFlags...), - cli.NewFlagGroup("options", - cli.BoolFlag{ - Name: "recursive", - Patterns: []string{"-r", "--recursive"}, - Description: "Delete directory and all it's content", - OmitValue: true, - }, - ), - }, - }, - &cli.Handler{ - Pattern: "[global] sync list [options]", - Description: "List all syncable directories on drive", - Callback: listSyncHandler, - FlagGroups: cli.FlagGroups{ - cli.NewFlagGroup("global", globalFlags...), - cli.NewFlagGroup("options", - cli.BoolFlag{ - Name: "skipHeader", - Patterns: []string{"--no-header"}, - Description: "Dont print the header", - OmitValue: true, - }, - ), - }, - }, - &cli.Handler{ - Pattern: "[global] sync list content [options] ", - Description: "List content of syncable directory", - Callback: listRecursiveSyncHandler, - FlagGroups: cli.FlagGroups{ - cli.NewFlagGroup("global", globalFlags...), - cli.NewFlagGroup("options", - cli.StringFlag{ - Name: "sortOrder", - Patterns: []string{"--order"}, - Description: "Sort order. See https://godoc.org/google.golang.org/api/drive/v3#FilesListCall.OrderBy", - }, - cli.IntFlag{ - Name: "pathWidth", - Patterns: []string{"--path-width"}, - Description: fmt.Sprintf("Width of path column, default: %d, minimum: 9, use 0 for full width", DefaultPathWidth), - DefaultValue: DefaultPathWidth, - }, - cli.BoolFlag{ - Name: "skipHeader", - Patterns: []string{"--no-header"}, - Description: "Dont print the header", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "sizeInBytes", - Patterns: []string{"--bytes"}, - Description: "Size in bytes", - OmitValue: true, - }, - ), - }, - }, - &cli.Handler{ - Pattern: "[global] sync download [options] ", - Description: "Sync drive directory to local directory", - Callback: downloadSyncHandler, - FlagGroups: cli.FlagGroups{ - cli.NewFlagGroup("global", globalFlags...), - cli.NewFlagGroup("options", - cli.BoolFlag{ - Name: "keepRemote", - Patterns: []string{"--keep-remote"}, - Description: "Keep remote file when a conflict is encountered", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "keepLocal", - Patterns: []string{"--keep-local"}, - Description: "Keep local file when a conflict is encountered", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "keepLargest", - Patterns: []string{"--keep-largest"}, - Description: "Keep largest file when a conflict is encountered", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "deleteExtraneous", - Patterns: []string{"--delete-extraneous"}, - Description: "Delete extraneous local files", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "dryRun", - Patterns: []string{"--dry-run"}, - Description: "Show what would have been transferred", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "noProgress", - Patterns: []string{"--no-progress"}, - Description: "Hide progress", - OmitValue: true, - }, - ), - }, - }, - &cli.Handler{ - Pattern: "[global] sync upload [options] ", - Description: "Sync local directory to drive", - Callback: uploadSyncHandler, - FlagGroups: cli.FlagGroups{ - cli.NewFlagGroup("global", globalFlags...), - cli.NewFlagGroup("options", - cli.BoolFlag{ - Name: "keepRemote", - Patterns: []string{"--keep-remote"}, - Description: "Keep remote file when a conflict is encountered", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "keepLocal", - Patterns: []string{"--keep-local"}, - Description: "Keep local file when a conflict is encountered", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "keepLargest", - Patterns: []string{"--keep-largest"}, - Description: "Keep largest file when a conflict is encountered", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "deleteExtraneous", - Patterns: []string{"--delete-extraneous"}, - Description: "Delete extraneous remote files", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "dryRun", - Patterns: []string{"--dry-run"}, - Description: "Show what would have been transferred", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "noProgress", - Patterns: []string{"--no-progress"}, - Description: "Hide progress", - OmitValue: true, - }, - cli.IntFlag{ - Name: "chunksize", - Patterns: []string{"--chunksize"}, - Description: fmt.Sprintf("Set chunk size in bytes, default: %d", DefaultUploadChunkSize), - DefaultValue: DefaultUploadChunkSize, - }, - ), - }, - }, - &cli.Handler{ - Pattern: "[global] changes [options]", - Description: "List file changes", - Callback: listChangesHandler, - FlagGroups: cli.FlagGroups{ - cli.NewFlagGroup("global", globalFlags...), - cli.NewFlagGroup("options", - cli.IntFlag{ - Name: "maxChanges", - Patterns: []string{"-m", "--max"}, - Description: fmt.Sprintf("Max changes to list, default: %d", DefaultMaxChanges), - DefaultValue: DefaultMaxChanges, - }, - cli.StringFlag{ - Name: "pageToken", - Patterns: []string{"--since"}, - Description: fmt.Sprintf("Page token to start listing changes from"), - DefaultValue: "1", - }, - cli.BoolFlag{ - Name: "now", - Patterns: []string{"--now"}, - Description: fmt.Sprintf("Get latest page token"), - OmitValue: true, - }, - cli.IntFlag{ - Name: "nameWidth", - Patterns: []string{"--name-width"}, - Description: fmt.Sprintf("Width of name column, default: %d, minimum: 9, use 0 for full width", DefaultNameWidth), - DefaultValue: DefaultNameWidth, - }, - cli.BoolFlag{ - Name: "skipHeader", - Patterns: []string{"--no-header"}, - Description: "Dont print the header", - OmitValue: true, - }, - ), - }, - }, - &cli.Handler{ - Pattern: "[global] revision list [options] ", - Description: "List file revisions", - Callback: listRevisionsHandler, - FlagGroups: cli.FlagGroups{ - cli.NewFlagGroup("global", globalFlags...), - cli.NewFlagGroup("options", - cli.IntFlag{ - Name: "nameWidth", - Patterns: []string{"--name-width"}, - Description: fmt.Sprintf("Width of name column, default: %d, minimum: 9, use 0 for full width", DefaultNameWidth), - DefaultValue: DefaultNameWidth, - }, - cli.BoolFlag{ - Name: "skipHeader", - Patterns: []string{"--no-header"}, - Description: "Dont print the header", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "sizeInBytes", - Patterns: []string{"--bytes"}, - Description: "Size in bytes", - OmitValue: true, - }, - ), - }, - }, - &cli.Handler{ - Pattern: "[global] revision download [options] ", - Description: "Download revision", - Callback: downloadRevisionHandler, - FlagGroups: cli.FlagGroups{ - cli.NewFlagGroup("global", globalFlags...), - cli.NewFlagGroup("options", - cli.BoolFlag{ - Name: "force", - Patterns: []string{"-f", "--force"}, - Description: "Overwrite existing file", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "noProgress", - Patterns: []string{"--no-progress"}, - Description: "Hide progress", - OmitValue: true, - }, - cli.BoolFlag{ - Name: "stdout", - Patterns: []string{"--stdout"}, - Description: "Write file content to stdout", - OmitValue: true, - }, - cli.StringFlag{ - Name: "path", - Patterns: []string{"--path"}, - Description: "Download path", - }, - ), - }, - }, - &cli.Handler{ - Pattern: "[global] revision delete ", - Description: "Delete file revision", - Callback: deleteRevisionHandler, - FlagGroups: cli.FlagGroups{ - cli.NewFlagGroup("global", globalFlags...), - }, - }, - &cli.Handler{ - Pattern: "[global] import [options] ", - Description: "Upload and convert file to a google document, see 'about import' for available conversions", - Callback: importHandler, - FlagGroups: cli.FlagGroups{ - cli.NewFlagGroup("global", globalFlags...), - cli.NewFlagGroup("options", - cli.StringSliceFlag{ - Name: "parent", - Patterns: []string{"-p", "--parent"}, - Description: "Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents", - }, - cli.BoolFlag{ - Name: "noProgress", - Patterns: []string{"--no-progress"}, - Description: "Hide progress", - OmitValue: true, - }, - ), - }, - }, - &cli.Handler{ - Pattern: "[global] export [options] ", - Description: "Export a google document", - Callback: exportHandler, - FlagGroups: cli.FlagGroups{ - cli.NewFlagGroup("global", globalFlags...), - cli.NewFlagGroup("options", - cli.BoolFlag{ - Name: "force", - Patterns: []string{"-f", "--force"}, - Description: "Overwrite existing file", - OmitValue: true, - }, - cli.StringFlag{ - Name: "mime", - Patterns: []string{"--mime"}, - Description: "Mime type of exported file", - }, - cli.BoolFlag{ - Name: "printMimes", - Patterns: []string{"--print-mimes"}, - Description: "Print available mime types for given file", - OmitValue: true, - }, - ), - }, - }, - &cli.Handler{ - Pattern: "[global] about [options]", - Description: "Google drive metadata, quota usage", - Callback: aboutHandler, - FlagGroups: cli.FlagGroups{ - cli.NewFlagGroup("global", globalFlags...), - cli.NewFlagGroup("options", - cli.BoolFlag{ - Name: "sizeInBytes", - Patterns: []string{"--bytes"}, - Description: "Show size in bytes", - OmitValue: true, - }, - ), - }, - }, - &cli.Handler{ - Pattern: "[global] about import", - Description: "Show supported import formats", - Callback: aboutImportHandler, - FlagGroups: cli.FlagGroups{ - cli.NewFlagGroup("global", globalFlags...), - }, - }, - &cli.Handler{ - Pattern: "[global] about export", - Description: "Show supported export formats", - Callback: aboutExportHandler, - FlagGroups: cli.FlagGroups{ - cli.NewFlagGroup("global", globalFlags...), - }, - }, - &cli.Handler{ - Pattern: "version", - Description: "Print application version", - Callback: printVersion, - }, - &cli.Handler{ - Pattern: "help", - Description: "Print help", - Callback: printHelp, - }, - &cli.Handler{ - Pattern: "help ", - Description: "Print command help", - Callback: printCommandHelp, - }, - &cli.Handler{ - Pattern: "help ", - Description: "Print subcommand help", - Callback: printSubCommandHelp, - }, - } + handlers := []*cli.Handler{ + &cli.Handler{ + Pattern: "[global] list [options]", + Description: "List files", + Callback: listHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", + cli.IntFlag{ + Name: "maxFiles", + Patterns: []string{"-m", "--max"}, + Description: fmt.Sprintf("Max files to list, default: %d", DefaultMaxFiles), + DefaultValue: DefaultMaxFiles, + }, + cli.StringFlag{ + Name: "query", + Patterns: []string{"-q", "--query"}, + Description: fmt.Sprintf(`Default query: "%s". See https://developers.google.com/drive/search-parameters`, DefaultQuery), + DefaultValue: DefaultQuery, + }, + cli.StringFlag{ + Name: "sortOrder", + Patterns: []string{"--order"}, + Description: "Sort order. See https://godoc.org/google.golang.org/api/drive/v3#FilesListCall.OrderBy", + }, + cli.IntFlag{ + Name: "nameWidth", + Patterns: []string{"--name-width"}, + Description: fmt.Sprintf("Width of name column, default: %d, minimum: 9, use 0 for full width", DefaultNameWidth), + DefaultValue: DefaultNameWidth, + }, + cli.BoolFlag{ + Name: "absPath", + Patterns: []string{"--absolute"}, + Description: "Show absolute path to file (will only show path from first parent)", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "skipHeader", + Patterns: []string{"--no-header"}, + Description: "Dont print the header", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "sizeInBytes", + Patterns: []string{"--bytes"}, + Description: "Size in bytes", + OmitValue: true, + }, + ), + }, + }, + &cli.Handler{ + Pattern: "[global] download [options] ", + Description: "Download file or directory", + Callback: downloadHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", + cli.BoolFlag{ + Name: "force", + Patterns: []string{"-f", "--force"}, + Description: "Overwrite existing file", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "recursive", + Patterns: []string{"-r", "--recursive"}, + Description: "Download directory recursively, documents will be skipped", + OmitValue: true, + }, + cli.StringFlag{ + Name: "path", + Patterns: []string{"--path"}, + Description: "Download path", + }, + cli.BoolFlag{ + Name: "delete", + Patterns: []string{"--delete"}, + Description: "Delete remote file when download is successful", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "noProgress", + Patterns: []string{"--no-progress"}, + Description: "Hide progress", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "stdout", + Patterns: []string{"--stdout"}, + Description: "Write file content to stdout", + OmitValue: true, + }, + ), + }, + }, + &cli.Handler{ + Pattern: "[global] download query [options] ", + Description: "Download all files and directories matching query", + Callback: downloadQueryHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", + cli.BoolFlag{ + Name: "force", + Patterns: []string{"-f", "--force"}, + Description: "Overwrite existing file", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "recursive", + Patterns: []string{"-r", "--recursive"}, + Description: "Download directories recursively, documents will be skipped", + OmitValue: true, + }, + cli.StringFlag{ + Name: "path", + Patterns: []string{"--path"}, + Description: "Download path", + }, + cli.BoolFlag{ + Name: "noProgress", + Patterns: []string{"--no-progress"}, + Description: "Hide progress", + OmitValue: true, + }, + ), + }, + }, + &cli.Handler{ + Pattern: "[global] upload [options] ", + Description: "Upload file or directory", + Callback: uploadHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", + cli.BoolFlag{ + Name: "recursive", + Patterns: []string{"-r", "--recursive"}, + Description: "Upload directory recursively", + OmitValue: true, + }, + cli.StringSliceFlag{ + Name: "parent", + Patterns: []string{"-p", "--parent"}, + Description: "Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents", + }, + cli.StringFlag{ + Name: "name", + Patterns: []string{"--name"}, + Description: "Filename", + }, + cli.BoolFlag{ + Name: "noProgress", + Patterns: []string{"--no-progress"}, + Description: "Hide progress", + OmitValue: true, + }, + cli.StringFlag{ + Name: "mime", + Patterns: []string{"--mime"}, + Description: "Force mime type", + }, + cli.BoolFlag{ + Name: "share", + Patterns: []string{"--share"}, + Description: "Share file", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "delete", + Patterns: []string{"--delete"}, + Description: "Delete local file when upload is successful", + OmitValue: true, + }, + cli.IntFlag{ + Name: "chunksize", + Patterns: []string{"--chunksize"}, + Description: fmt.Sprintf("Set chunk size in bytes, default: %d", DefaultUploadChunkSize), + DefaultValue: DefaultUploadChunkSize, + }, + ), + }, + }, + &cli.Handler{ + Pattern: "[global] upload - [options] ", + Description: "Upload file from stdin", + Callback: uploadStdinHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", + cli.StringSliceFlag{ + Name: "parent", + Patterns: []string{"-p", "--parent"}, + Description: "Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents", + }, + cli.IntFlag{ + Name: "chunksize", + Patterns: []string{"--chunksize"}, + Description: fmt.Sprintf("Set chunk size in bytes, default: %d", DefaultUploadChunkSize), + DefaultValue: DefaultUploadChunkSize, + }, + cli.StringFlag{ + Name: "mime", + Patterns: []string{"--mime"}, + Description: "Force mime type", + }, + cli.BoolFlag{ + Name: "share", + Patterns: []string{"--share"}, + Description: "Share file", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "noProgress", + Patterns: []string{"--no-progress"}, + Description: "Hide progress", + OmitValue: true, + }, + ), + }, + }, + &cli.Handler{ + Pattern: "[global] update [options] ", + Description: "Update file, this creates a new revision of the file", + Callback: updateHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", + cli.StringSliceFlag{ + Name: "parent", + Patterns: []string{"-p", "--parent"}, + Description: "Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents", + }, + cli.StringFlag{ + Name: "name", + Patterns: []string{"--name"}, + Description: "Filename", + }, + cli.BoolFlag{ + Name: "noProgress", + Patterns: []string{"--no-progress"}, + Description: "Hide progress", + OmitValue: true, + }, + cli.StringFlag{ + Name: "mime", + Patterns: []string{"--mime"}, + Description: "Force mime type", + }, + cli.IntFlag{ + Name: "chunksize", + Patterns: []string{"--chunksize"}, + Description: fmt.Sprintf("Set chunk size in bytes, default: %d", DefaultUploadChunkSize), + DefaultValue: DefaultUploadChunkSize, + }, + ), + }, + }, + &cli.Handler{ + Pattern: "[global] info [options] ", + Description: "Show file info", + Callback: infoHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", + cli.BoolFlag{ + Name: "sizeInBytes", + Patterns: []string{"--bytes"}, + Description: "Show size in bytes", + OmitValue: true, + }, + ), + }, + }, + &cli.Handler{ + Pattern: "[global] mkdir [options] ", + Description: "Create directory", + Callback: mkdirHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", + cli.StringSliceFlag{ + Name: "parent", + Patterns: []string{"-p", "--parent"}, + Description: "Parent id of created directory, can be specified multiple times to give many parents", + }, + ), + }, + }, + &cli.Handler{ + Pattern: "[global] share [options] ", + Description: "Share file or directory", + Callback: shareHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", + cli.StringFlag{ + Name: "role", + Patterns: []string{"--role"}, + Description: fmt.Sprintf("Share role: owner/writer/commenter/reader, default: %s", DefaultShareRole), + DefaultValue: DefaultShareRole, + }, + cli.StringFlag{ + Name: "type", + Patterns: []string{"--type"}, + Description: fmt.Sprintf("Share type: user/group/domain/anyone, default: %s", DefaultShareType), + DefaultValue: DefaultShareType, + }, + cli.StringFlag{ + Name: "email", + Patterns: []string{"--email"}, + Description: "The email address of the user or group to share the file with. Requires 'user' or 'group' as type", + }, + cli.BoolFlag{ + Name: "discoverable", + Patterns: []string{"--discoverable"}, + Description: "Make file discoverable by search engines", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "revoke", + Patterns: []string{"--revoke"}, + Description: "Delete all sharing permissions (owner roles will be skipped)", + OmitValue: true, + }, + ), + }, + }, + &cli.Handler{ + Pattern: "[global] share list ", + Description: "List files permissions", + Callback: shareListHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + }, + }, + &cli.Handler{ + Pattern: "[global] share revoke ", + Description: "Revoke permission", + Callback: shareRevokeHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + }, + }, + &cli.Handler{ + Pattern: "[global] delete [options] ", + Description: "Delete file or directory", + Callback: deleteHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", + cli.BoolFlag{ + Name: "recursive", + Patterns: []string{"-r", "--recursive"}, + Description: "Delete directory and all it's content", + OmitValue: true, + }, + ), + }, + }, + &cli.Handler{ + Pattern: "[global] sync list [options]", + Description: "List all syncable directories on drive", + Callback: listSyncHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", + cli.BoolFlag{ + Name: "skipHeader", + Patterns: []string{"--no-header"}, + Description: "Dont print the header", + OmitValue: true, + }, + ), + }, + }, + &cli.Handler{ + Pattern: "[global] sync list content [options] ", + Description: "List content of syncable directory", + Callback: listRecursiveSyncHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", + cli.StringFlag{ + Name: "sortOrder", + Patterns: []string{"--order"}, + Description: "Sort order. See https://godoc.org/google.golang.org/api/drive/v3#FilesListCall.OrderBy", + }, + cli.IntFlag{ + Name: "pathWidth", + Patterns: []string{"--path-width"}, + Description: fmt.Sprintf("Width of path column, default: %d, minimum: 9, use 0 for full width", DefaultPathWidth), + DefaultValue: DefaultPathWidth, + }, + cli.BoolFlag{ + Name: "skipHeader", + Patterns: []string{"--no-header"}, + Description: "Dont print the header", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "sizeInBytes", + Patterns: []string{"--bytes"}, + Description: "Size in bytes", + OmitValue: true, + }, + ), + }, + }, + &cli.Handler{ + Pattern: "[global] sync download [options] ", + Description: "Sync drive directory to local directory", + Callback: downloadSyncHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", + cli.BoolFlag{ + Name: "keepRemote", + Patterns: []string{"--keep-remote"}, + Description: "Keep remote file when a conflict is encountered", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "keepLocal", + Patterns: []string{"--keep-local"}, + Description: "Keep local file when a conflict is encountered", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "keepLargest", + Patterns: []string{"--keep-largest"}, + Description: "Keep largest file when a conflict is encountered", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "deleteExtraneous", + Patterns: []string{"--delete-extraneous"}, + Description: "Delete extraneous local files", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "dryRun", + Patterns: []string{"--dry-run"}, + Description: "Show what would have been transferred", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "noProgress", + Patterns: []string{"--no-progress"}, + Description: "Hide progress", + OmitValue: true, + }, + ), + }, + }, + &cli.Handler{ + Pattern: "[global] sync upload [options] ", + Description: "Sync local directory to drive", + Callback: uploadSyncHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", + cli.BoolFlag{ + Name: "keepRemote", + Patterns: []string{"--keep-remote"}, + Description: "Keep remote file when a conflict is encountered", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "keepLocal", + Patterns: []string{"--keep-local"}, + Description: "Keep local file when a conflict is encountered", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "keepLargest", + Patterns: []string{"--keep-largest"}, + Description: "Keep largest file when a conflict is encountered", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "deleteExtraneous", + Patterns: []string{"--delete-extraneous"}, + Description: "Delete extraneous remote files", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "dryRun", + Patterns: []string{"--dry-run"}, + Description: "Show what would have been transferred", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "noProgress", + Patterns: []string{"--no-progress"}, + Description: "Hide progress", + OmitValue: true, + }, + cli.IntFlag{ + Name: "chunksize", + Patterns: []string{"--chunksize"}, + Description: fmt.Sprintf("Set chunk size in bytes, default: %d", DefaultUploadChunkSize), + DefaultValue: DefaultUploadChunkSize, + }, + ), + }, + }, + &cli.Handler{ + Pattern: "[global] changes [options]", + Description: "List file changes", + Callback: listChangesHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", + cli.IntFlag{ + Name: "maxChanges", + Patterns: []string{"-m", "--max"}, + Description: fmt.Sprintf("Max changes to list, default: %d", DefaultMaxChanges), + DefaultValue: DefaultMaxChanges, + }, + cli.StringFlag{ + Name: "pageToken", + Patterns: []string{"--since"}, + Description: fmt.Sprintf("Page token to start listing changes from"), + DefaultValue: "1", + }, + cli.BoolFlag{ + Name: "now", + Patterns: []string{"--now"}, + Description: fmt.Sprintf("Get latest page token"), + OmitValue: true, + }, + cli.IntFlag{ + Name: "nameWidth", + Patterns: []string{"--name-width"}, + Description: fmt.Sprintf("Width of name column, default: %d, minimum: 9, use 0 for full width", DefaultNameWidth), + DefaultValue: DefaultNameWidth, + }, + cli.BoolFlag{ + Name: "skipHeader", + Patterns: []string{"--no-header"}, + Description: "Dont print the header", + OmitValue: true, + }, + ), + }, + }, + &cli.Handler{ + Pattern: "[global] revision list [options] ", + Description: "List file revisions", + Callback: listRevisionsHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", + cli.IntFlag{ + Name: "nameWidth", + Patterns: []string{"--name-width"}, + Description: fmt.Sprintf("Width of name column, default: %d, minimum: 9, use 0 for full width", DefaultNameWidth), + DefaultValue: DefaultNameWidth, + }, + cli.BoolFlag{ + Name: "skipHeader", + Patterns: []string{"--no-header"}, + Description: "Dont print the header", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "sizeInBytes", + Patterns: []string{"--bytes"}, + Description: "Size in bytes", + OmitValue: true, + }, + ), + }, + }, + &cli.Handler{ + Pattern: "[global] revision download [options] ", + Description: "Download revision", + Callback: downloadRevisionHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", + cli.BoolFlag{ + Name: "force", + Patterns: []string{"-f", "--force"}, + Description: "Overwrite existing file", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "noProgress", + Patterns: []string{"--no-progress"}, + Description: "Hide progress", + OmitValue: true, + }, + cli.BoolFlag{ + Name: "stdout", + Patterns: []string{"--stdout"}, + Description: "Write file content to stdout", + OmitValue: true, + }, + cli.StringFlag{ + Name: "path", + Patterns: []string{"--path"}, + Description: "Download path", + }, + ), + }, + }, + &cli.Handler{ + Pattern: "[global] revision delete ", + Description: "Delete file revision", + Callback: deleteRevisionHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + }, + }, + &cli.Handler{ + Pattern: "[global] import [options] ", + Description: "Upload and convert file to a google document, see 'about import' for available conversions", + Callback: importHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", + cli.StringSliceFlag{ + Name: "parent", + Patterns: []string{"-p", "--parent"}, + Description: "Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents", + }, + cli.BoolFlag{ + Name: "noProgress", + Patterns: []string{"--no-progress"}, + Description: "Hide progress", + OmitValue: true, + }, + ), + }, + }, + &cli.Handler{ + Pattern: "[global] export [options] ", + Description: "Export a google document", + Callback: exportHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", + cli.BoolFlag{ + Name: "force", + Patterns: []string{"-f", "--force"}, + Description: "Overwrite existing file", + OmitValue: true, + }, + cli.StringFlag{ + Name: "mime", + Patterns: []string{"--mime"}, + Description: "Mime type of exported file", + }, + cli.BoolFlag{ + Name: "printMimes", + Patterns: []string{"--print-mimes"}, + Description: "Print available mime types for given file", + OmitValue: true, + }, + ), + }, + }, + &cli.Handler{ + Pattern: "[global] about [options]", + Description: "Google drive metadata, quota usage", + Callback: aboutHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + cli.NewFlagGroup("options", + cli.BoolFlag{ + Name: "sizeInBytes", + Patterns: []string{"--bytes"}, + Description: "Show size in bytes", + OmitValue: true, + }, + ), + }, + }, + &cli.Handler{ + Pattern: "[global] about import", + Description: "Show supported import formats", + Callback: aboutImportHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + }, + }, + &cli.Handler{ + Pattern: "[global] about export", + Description: "Show supported export formats", + Callback: aboutExportHandler, + FlagGroups: cli.FlagGroups{ + cli.NewFlagGroup("global", globalFlags...), + }, + }, + &cli.Handler{ + Pattern: "version", + Description: "Print application version", + Callback: printVersion, + }, + &cli.Handler{ + Pattern: "help", + Description: "Print help", + Callback: printHelp, + }, + &cli.Handler{ + Pattern: "help ", + Description: "Print command help", + Callback: printCommandHelp, + }, + &cli.Handler{ + Pattern: "help ", + Description: "Print subcommand help", + Callback: printSubCommandHelp, + }, + } - cli.SetHandlers(handlers) + cli.SetHandlers(handlers) - if ok := cli.Handle(os.Args[1:]); !ok { - ExitF("No valid arguments given, use '%s help' to see available commands", Name) - } + if ok := cli.Handle(os.Args[1:]); !ok { + ExitF("No valid arguments given, use '%s help' to see available commands", Name) + } } diff --git a/handlers_drive.go b/handlers_drive.go index 05b1ca9c..7812c9b2 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -1,427 +1,426 @@ package main import ( + "./auth" + "./cli" + "./drive" "fmt" - "os" "io" "io/ioutil" - "path/filepath" "net/http" - "./cli" - "./auth" - "./drive" + "os" + "path/filepath" ) -const ClientId = "367116221053-7n0vf5akeru7on6o2fjinrecpdoe99eg.apps.googleusercontent.com" +const ClientId = "367116221053-7n0vf5akeru7on6o2fjinrecpdoe99eg.apps.googleusercontent.com" const ClientSecret = "1qsNodXNaWq1mQuBjUjmvhoO" const TokenFilename = "token_v2.json" const DefaultCacheFileName = "file_cache.json" - func listHandler(ctx cli.Context) { - args := ctx.Args() - err := newDrive(args).List(drive.ListFilesArgs{ - Out: os.Stdout, - MaxFiles: args.Int64("maxFiles"), - NameWidth: args.Int64("nameWidth"), - Query: args.String("query"), - SortOrder: args.String("sortOrder"), - SkipHeader: args.Bool("skipHeader"), - SizeInBytes: args.Bool("sizeInBytes"), - AbsPath: args.Bool("absPath"), - }) - checkErr(err) + args := ctx.Args() + err := newDrive(args).List(drive.ListFilesArgs{ + Out: os.Stdout, + MaxFiles: args.Int64("maxFiles"), + NameWidth: args.Int64("nameWidth"), + Query: args.String("query"), + SortOrder: args.String("sortOrder"), + SkipHeader: args.Bool("skipHeader"), + SizeInBytes: args.Bool("sizeInBytes"), + AbsPath: args.Bool("absPath"), + }) + checkErr(err) } func listChangesHandler(ctx cli.Context) { - args := ctx.Args() - err := newDrive(args).ListChanges(drive.ListChangesArgs{ - Out: os.Stdout, - PageToken: args.String("pageToken"), - MaxChanges: args.Int64("maxChanges"), - Now: args.Bool("now"), - NameWidth: args.Int64("nameWidth"), - SkipHeader: args.Bool("skipHeader"), - }) - checkErr(err) + args := ctx.Args() + err := newDrive(args).ListChanges(drive.ListChangesArgs{ + Out: os.Stdout, + PageToken: args.String("pageToken"), + MaxChanges: args.Int64("maxChanges"), + Now: args.Bool("now"), + NameWidth: args.Int64("nameWidth"), + SkipHeader: args.Bool("skipHeader"), + }) + checkErr(err) } func downloadHandler(ctx cli.Context) { - args := ctx.Args() - checkDownloadArgs(args) - err := newDrive(args).Download(drive.DownloadArgs{ - Out: os.Stdout, - Id: args.String("fileId"), - Force: args.Bool("force"), - Path: args.String("path"), - Delete: args.Bool("delete"), - Recursive: args.Bool("recursive"), - Stdout: args.Bool("stdout"), - Progress: progressWriter(args.Bool("noProgress")), - }) - checkErr(err) + args := ctx.Args() + checkDownloadArgs(args) + err := newDrive(args).Download(drive.DownloadArgs{ + Out: os.Stdout, + Id: args.String("fileId"), + Force: args.Bool("force"), + Path: args.String("path"), + Delete: args.Bool("delete"), + Recursive: args.Bool("recursive"), + Stdout: args.Bool("stdout"), + Progress: progressWriter(args.Bool("noProgress")), + }) + checkErr(err) } func downloadQueryHandler(ctx cli.Context) { - args := ctx.Args() - err := newDrive(args).DownloadQuery(drive.DownloadQueryArgs{ - Out: os.Stdout, - Query: args.String("query"), - Force: args.Bool("force"), - Recursive: args.Bool("recursive"), - Path: args.String("path"), - Progress: progressWriter(args.Bool("noProgress")), - }) - checkErr(err) + args := ctx.Args() + err := newDrive(args).DownloadQuery(drive.DownloadQueryArgs{ + Out: os.Stdout, + Query: args.String("query"), + Force: args.Bool("force"), + Recursive: args.Bool("recursive"), + Path: args.String("path"), + Progress: progressWriter(args.Bool("noProgress")), + }) + checkErr(err) } func downloadSyncHandler(ctx cli.Context) { - args := ctx.Args() - cachePath := filepath.Join(args.String("configDir"), DefaultCacheFileName) - err := newDrive(args).DownloadSync(drive.DownloadSyncArgs{ - Out: os.Stdout, - Progress: progressWriter(args.Bool("noProgress")), - Path: args.String("path"), - RootId: args.String("fileId"), - DryRun: args.Bool("dryRun"), - DeleteExtraneous: args.Bool("deleteExtraneous"), - Resolution: conflictResolution(args), - Comparer: NewCachedMd5Comparer(cachePath), - }) - checkErr(err) + args := ctx.Args() + cachePath := filepath.Join(args.String("configDir"), DefaultCacheFileName) + err := newDrive(args).DownloadSync(drive.DownloadSyncArgs{ + Out: os.Stdout, + Progress: progressWriter(args.Bool("noProgress")), + Path: args.String("path"), + RootId: args.String("fileId"), + DryRun: args.Bool("dryRun"), + DeleteExtraneous: args.Bool("deleteExtraneous"), + Resolution: conflictResolution(args), + Comparer: NewCachedMd5Comparer(cachePath), + }) + checkErr(err) } func downloadRevisionHandler(ctx cli.Context) { - args := ctx.Args() - err := newDrive(args).DownloadRevision(drive.DownloadRevisionArgs{ - Out: os.Stdout, - FileId: args.String("fileId"), - RevisionId: args.String("revisionId"), - Force: args.Bool("force"), - Stdout: args.Bool("stdout"), - Path: args.String("path"), - Progress: progressWriter(args.Bool("noProgress")), - }) - checkErr(err) + args := ctx.Args() + err := newDrive(args).DownloadRevision(drive.DownloadRevisionArgs{ + Out: os.Stdout, + FileId: args.String("fileId"), + RevisionId: args.String("revisionId"), + Force: args.Bool("force"), + Stdout: args.Bool("stdout"), + Path: args.String("path"), + Progress: progressWriter(args.Bool("noProgress")), + }) + checkErr(err) } func uploadHandler(ctx cli.Context) { - args := ctx.Args() - checkUploadArgs(args) - err := newDrive(args).Upload(drive.UploadArgs{ - Out: os.Stdout, - Progress: progressWriter(args.Bool("noProgress")), - Path: args.String("path"), - Name: args.String("name"), - Parents: args.StringSlice("parent"), - Mime: args.String("mime"), - Recursive: args.Bool("recursive"), - Share: args.Bool("share"), - Delete: args.Bool("delete"), - ChunkSize: args.Int64("chunksize"), - }) - checkErr(err) + args := ctx.Args() + checkUploadArgs(args) + err := newDrive(args).Upload(drive.UploadArgs{ + Out: os.Stdout, + Progress: progressWriter(args.Bool("noProgress")), + Path: args.String("path"), + Name: args.String("name"), + Parents: args.StringSlice("parent"), + Mime: args.String("mime"), + Recursive: args.Bool("recursive"), + Share: args.Bool("share"), + Delete: args.Bool("delete"), + ChunkSize: args.Int64("chunksize"), + }) + checkErr(err) } func uploadStdinHandler(ctx cli.Context) { - args := ctx.Args() - err := newDrive(args).UploadStream(drive.UploadStreamArgs{ - Out: os.Stdout, - In: os.Stdin, - Name: args.String("name"), - Parents: args.StringSlice("parent"), - Mime: args.String("mime"), - Share: args.Bool("share"), - ChunkSize: args.Int64("chunksize"), - Progress: progressWriter(args.Bool("noProgress")), - }) - checkErr(err) + args := ctx.Args() + err := newDrive(args).UploadStream(drive.UploadStreamArgs{ + Out: os.Stdout, + In: os.Stdin, + Name: args.String("name"), + Parents: args.StringSlice("parent"), + Mime: args.String("mime"), + Share: args.Bool("share"), + ChunkSize: args.Int64("chunksize"), + Progress: progressWriter(args.Bool("noProgress")), + }) + checkErr(err) } func uploadSyncHandler(ctx cli.Context) { - args := ctx.Args() - cachePath := filepath.Join(args.String("configDir"), DefaultCacheFileName) - err := newDrive(args).UploadSync(drive.UploadSyncArgs{ - Out: os.Stdout, - Progress: progressWriter(args.Bool("noProgress")), - Path: args.String("path"), - RootId: args.String("fileId"), - DryRun: args.Bool("dryRun"), - DeleteExtraneous: args.Bool("deleteExtraneous"), - ChunkSize: args.Int64("chunksize"), - Resolution: conflictResolution(args), - Comparer: NewCachedMd5Comparer(cachePath), - }) - checkErr(err) + args := ctx.Args() + cachePath := filepath.Join(args.String("configDir"), DefaultCacheFileName) + err := newDrive(args).UploadSync(drive.UploadSyncArgs{ + Out: os.Stdout, + Progress: progressWriter(args.Bool("noProgress")), + Path: args.String("path"), + RootId: args.String("fileId"), + DryRun: args.Bool("dryRun"), + DeleteExtraneous: args.Bool("deleteExtraneous"), + ChunkSize: args.Int64("chunksize"), + Resolution: conflictResolution(args), + Comparer: NewCachedMd5Comparer(cachePath), + }) + checkErr(err) } func updateHandler(ctx cli.Context) { - args := ctx.Args() - err := newDrive(args).Update(drive.UpdateArgs{ - Out: os.Stdout, - Id: args.String("fileId"), - Path: args.String("path"), - Name: args.String("name"), - Parents: args.StringSlice("parent"), - Mime: args.String("mime"), - Progress: progressWriter(args.Bool("noProgress")), - ChunkSize: args.Int64("chunksize"), - }) - checkErr(err) + args := ctx.Args() + err := newDrive(args).Update(drive.UpdateArgs{ + Out: os.Stdout, + Id: args.String("fileId"), + Path: args.String("path"), + Name: args.String("name"), + Parents: args.StringSlice("parent"), + Mime: args.String("mime"), + Progress: progressWriter(args.Bool("noProgress")), + ChunkSize: args.Int64("chunksize"), + }) + checkErr(err) } func infoHandler(ctx cli.Context) { - args := ctx.Args() - err := newDrive(args).Info(drive.FileInfoArgs{ - Out: os.Stdout, - Id: args.String("fileId"), - SizeInBytes: args.Bool("sizeInBytes"), - }) - checkErr(err) + args := ctx.Args() + err := newDrive(args).Info(drive.FileInfoArgs{ + Out: os.Stdout, + Id: args.String("fileId"), + SizeInBytes: args.Bool("sizeInBytes"), + }) + checkErr(err) } func importHandler(ctx cli.Context) { - args := ctx.Args() - err := newDrive(args).Import(drive.ImportArgs{ - Out: os.Stdout, - Path: args.String("path"), - Parents: args.StringSlice("parent"), - Progress: progressWriter(args.Bool("noProgress")), - }) - checkErr(err) + args := ctx.Args() + err := newDrive(args).Import(drive.ImportArgs{ + Out: os.Stdout, + Path: args.String("path"), + Parents: args.StringSlice("parent"), + Progress: progressWriter(args.Bool("noProgress")), + }) + checkErr(err) } func exportHandler(ctx cli.Context) { - args := ctx.Args() - err := newDrive(args).Export(drive.ExportArgs{ - Out: os.Stdout, - Id: args.String("fileId"), - Mime: args.String("mime"), - PrintMimes: args.Bool("printMimes"), - Force: args.Bool("force"), - }) - checkErr(err) + args := ctx.Args() + err := newDrive(args).Export(drive.ExportArgs{ + Out: os.Stdout, + Id: args.String("fileId"), + Mime: args.String("mime"), + PrintMimes: args.Bool("printMimes"), + Force: args.Bool("force"), + }) + checkErr(err) } func listRevisionsHandler(ctx cli.Context) { - args := ctx.Args() - err := newDrive(args).ListRevisions(drive.ListRevisionsArgs{ - Out: os.Stdout, - Id: args.String("fileId"), - NameWidth: args.Int64("nameWidth"), - SizeInBytes: args.Bool("sizeInBytes"), - SkipHeader: args.Bool("skipHeader"), - }) - checkErr(err) + args := ctx.Args() + err := newDrive(args).ListRevisions(drive.ListRevisionsArgs{ + Out: os.Stdout, + Id: args.String("fileId"), + NameWidth: args.Int64("nameWidth"), + SizeInBytes: args.Bool("sizeInBytes"), + SkipHeader: args.Bool("skipHeader"), + }) + checkErr(err) } func mkdirHandler(ctx cli.Context) { - args := ctx.Args() - err := newDrive(args).Mkdir(drive.MkdirArgs{ - Out: os.Stdout, - Name: args.String("name"), - Parents: args.StringSlice("parent"), - }) - checkErr(err) + args := ctx.Args() + err := newDrive(args).Mkdir(drive.MkdirArgs{ + Out: os.Stdout, + Name: args.String("name"), + Parents: args.StringSlice("parent"), + }) + checkErr(err) } func shareHandler(ctx cli.Context) { - args := ctx.Args() - err := newDrive(args).Share(drive.ShareArgs{ - Out: os.Stdout, - FileId: args.String("fileId"), - Role: args.String("role"), - Type: args.String("type"), - Email: args.String("email"), - Discoverable: args.Bool("discoverable"), - }) - checkErr(err) + args := ctx.Args() + err := newDrive(args).Share(drive.ShareArgs{ + Out: os.Stdout, + FileId: args.String("fileId"), + Role: args.String("role"), + Type: args.String("type"), + Email: args.String("email"), + Discoverable: args.Bool("discoverable"), + }) + checkErr(err) } func shareListHandler(ctx cli.Context) { - args := ctx.Args() - err := newDrive(args).ListPermissions(drive.ListPermissionsArgs{ - Out: os.Stdout, - FileId: args.String("fileId"), - }) - checkErr(err) + args := ctx.Args() + err := newDrive(args).ListPermissions(drive.ListPermissionsArgs{ + Out: os.Stdout, + FileId: args.String("fileId"), + }) + checkErr(err) } func shareRevokeHandler(ctx cli.Context) { - args := ctx.Args() - err := newDrive(args).RevokePermission(drive.RevokePermissionArgs{ - Out: os.Stdout, - FileId: args.String("fileId"), - PermissionId: args.String("permissionId"), - }) - checkErr(err) + args := ctx.Args() + err := newDrive(args).RevokePermission(drive.RevokePermissionArgs{ + Out: os.Stdout, + FileId: args.String("fileId"), + PermissionId: args.String("permissionId"), + }) + checkErr(err) } func deleteHandler(ctx cli.Context) { - args := ctx.Args() - err := newDrive(args).Delete(drive.DeleteArgs{ - Out: os.Stdout, - Id: args.String("fileId"), - Recursive: args.Bool("recursive"), - }) - checkErr(err) + args := ctx.Args() + err := newDrive(args).Delete(drive.DeleteArgs{ + Out: os.Stdout, + Id: args.String("fileId"), + Recursive: args.Bool("recursive"), + }) + checkErr(err) } func listSyncHandler(ctx cli.Context) { - args := ctx.Args() - err := newDrive(args).ListSync(drive.ListSyncArgs{ - Out: os.Stdout, - SkipHeader: args.Bool("skipHeader"), - }) - checkErr(err) + args := ctx.Args() + err := newDrive(args).ListSync(drive.ListSyncArgs{ + Out: os.Stdout, + SkipHeader: args.Bool("skipHeader"), + }) + checkErr(err) } func listRecursiveSyncHandler(ctx cli.Context) { - args := ctx.Args() - err := newDrive(args).ListRecursiveSync(drive.ListRecursiveSyncArgs{ - Out: os.Stdout, - RootId: args.String("fileId"), - SkipHeader: args.Bool("skipHeader"), - PathWidth: args.Int64("pathWidth"), - SizeInBytes: args.Bool("sizeInBytes"), - SortOrder: args.String("sortOrder"), - }) - checkErr(err) + args := ctx.Args() + err := newDrive(args).ListRecursiveSync(drive.ListRecursiveSyncArgs{ + Out: os.Stdout, + RootId: args.String("fileId"), + SkipHeader: args.Bool("skipHeader"), + PathWidth: args.Int64("pathWidth"), + SizeInBytes: args.Bool("sizeInBytes"), + SortOrder: args.String("sortOrder"), + }) + checkErr(err) } func deleteRevisionHandler(ctx cli.Context) { - args := ctx.Args() - err := newDrive(args).DeleteRevision(drive.DeleteRevisionArgs{ - Out: os.Stdout, - FileId: args.String("fileId"), - RevisionId: args.String("revisionId"), - }) - checkErr(err) + args := ctx.Args() + err := newDrive(args).DeleteRevision(drive.DeleteRevisionArgs{ + Out: os.Stdout, + FileId: args.String("fileId"), + RevisionId: args.String("revisionId"), + }) + checkErr(err) } func aboutHandler(ctx cli.Context) { - args := ctx.Args() - err := newDrive(args).About(drive.AboutArgs{ - Out: os.Stdout, - SizeInBytes: args.Bool("sizeInBytes"), - }) - checkErr(err) + args := ctx.Args() + err := newDrive(args).About(drive.AboutArgs{ + Out: os.Stdout, + SizeInBytes: args.Bool("sizeInBytes"), + }) + checkErr(err) } func aboutImportHandler(ctx cli.Context) { - args := ctx.Args() - err := newDrive(args).AboutImport(drive.AboutImportArgs{ - Out: os.Stdout, - }) - checkErr(err) + args := ctx.Args() + err := newDrive(args).AboutImport(drive.AboutImportArgs{ + Out: os.Stdout, + }) + checkErr(err) } func aboutExportHandler(ctx cli.Context) { - args := ctx.Args() - err := newDrive(args).AboutExport(drive.AboutExportArgs{ - Out: os.Stdout, - }) - checkErr(err) + args := ctx.Args() + err := newDrive(args).AboutExport(drive.AboutExportArgs{ + Out: os.Stdout, + }) + checkErr(err) } func getOauthClient(args cli.Arguments) (*http.Client, error) { - if args.String("refreshToken") != "" && args.String("accessToken") != "" { - ExitF("Access token not needed when refresh token is provided") - } + if args.String("refreshToken") != "" && args.String("accessToken") != "" { + ExitF("Access token not needed when refresh token is provided") + } - if args.String("refreshToken") != "" { - return auth.NewRefreshTokenClient(ClientId, ClientSecret, args.String("refreshToken")), nil - } + if args.String("refreshToken") != "" { + return auth.NewRefreshTokenClient(ClientId, ClientSecret, args.String("refreshToken")), nil + } - if args.String("accessToken") != "" { - return auth.NewAccessTokenClient(ClientId, ClientSecret, args.String("accessToken")), nil - } + if args.String("accessToken") != "" { + return auth.NewAccessTokenClient(ClientId, ClientSecret, args.String("accessToken")), nil + } - configDir := getConfigDir(args) - tokenPath := ConfigFilePath(configDir, TokenFilename) - return auth.NewFileSourceClient(ClientId, ClientSecret, tokenPath, authCodePrompt) + configDir := getConfigDir(args) + tokenPath := ConfigFilePath(configDir, TokenFilename) + return auth.NewFileSourceClient(ClientId, ClientSecret, tokenPath, authCodePrompt) } func getConfigDir(args cli.Arguments) string { - // Use dir from environment var if present - if os.Getenv("GDRIVE_CONFIG_DIR") != "" { - return os.Getenv("GDRIVE_CONFIG_DIR") - } - return args.String("configDir") + // Use dir from environment var if present + if os.Getenv("GDRIVE_CONFIG_DIR") != "" { + return os.Getenv("GDRIVE_CONFIG_DIR") + } + return args.String("configDir") } func newDrive(args cli.Arguments) *drive.Drive { - oauth, err := getOauthClient(args) - if err != nil { - ExitF("Failed getting oauth client: %s", err.Error()) - } + oauth, err := getOauthClient(args) + if err != nil { + ExitF("Failed getting oauth client: %s", err.Error()) + } - client, err := drive.New(oauth) - if err != nil { - ExitF("Failed getting drive: %s", err.Error()) - } + client, err := drive.New(oauth) + if err != nil { + ExitF("Failed getting drive: %s", err.Error()) + } - return client + return client } func authCodePrompt(url string) func() string { - return func() string { - fmt.Println("Authentication needed") - fmt.Println("Go to the following url in your browser:") - fmt.Printf("%s\n\n", url) - fmt.Print("Enter verification code: ") + return func() string { + fmt.Println("Authentication needed") + fmt.Println("Go to the following url in your browser:") + fmt.Printf("%s\n\n", url) + fmt.Print("Enter verification code: ") - var code string - if _, err := fmt.Scan(&code); err != nil { - fmt.Printf("Failed reading code: %s", err.Error()) - } - return code - } + var code string + if _, err := fmt.Scan(&code); err != nil { + fmt.Printf("Failed reading code: %s", err.Error()) + } + return code + } } func progressWriter(discard bool) io.Writer { - if discard { - return ioutil.Discard - } - return os.Stderr + if discard { + return ioutil.Discard + } + return os.Stderr } func conflictResolution(args cli.Arguments) drive.ConflictResolution { - keepLocal := args.Bool("keepLocal") - keepRemote := args.Bool("keepRemote") - keepLargest := args.Bool("keepLargest") + keepLocal := args.Bool("keepLocal") + keepRemote := args.Bool("keepRemote") + keepLargest := args.Bool("keepLargest") - if (keepLocal && keepRemote) || (keepLocal && keepLargest) || (keepRemote && keepLargest) { - ExitF("Only one conflict resolution flag can be given") - } + if (keepLocal && keepRemote) || (keepLocal && keepLargest) || (keepRemote && keepLargest) { + ExitF("Only one conflict resolution flag can be given") + } - if keepLocal { - return drive.KeepLocal - } + if keepLocal { + return drive.KeepLocal + } - if keepRemote { - return drive.KeepRemote - } + if keepRemote { + return drive.KeepRemote + } - if keepLargest { - return drive.KeepLargest - } + if keepLargest { + return drive.KeepLargest + } - return drive.NoResolution + return drive.NoResolution } func checkUploadArgs(args cli.Arguments) { - if args.Bool("recursive") && args.Bool("delete") { - ExitF("--delete is not allowed for recursive uploads") - } + if args.Bool("recursive") && args.Bool("delete") { + ExitF("--delete is not allowed for recursive uploads") + } - if args.Bool("recursive") && args.Bool("share") { - ExitF("--share is not allowed for recursive uploads") - } + if args.Bool("recursive") && args.Bool("share") { + ExitF("--share is not allowed for recursive uploads") + } } func checkDownloadArgs(args cli.Arguments) { - if args.Bool("recursive") && args.Bool("delete") { - ExitF("--delete is not allowed for recursive downloads") - } + if args.Bool("recursive") && args.Bool("delete") { + ExitF("--delete is not allowed for recursive downloads") + } } diff --git a/handlers_meta.go b/handlers_meta.go index 52be7105..72e3dd0f 100644 --- a/handlers_meta.go +++ b/handlers_meta.go @@ -1,95 +1,95 @@ package main import ( - "os" + "./cli" "fmt" - "strings" + "os" "runtime" - "text/tabwriter" - "./cli" + "strings" + "text/tabwriter" ) func printVersion(ctx cli.Context) { - fmt.Printf("%s: %s\n", Name, Version) - fmt.Printf("Golang: %s\n", runtime.Version()) - fmt.Printf("OS/Arch: %s/%s\n", runtime.GOOS, runtime.GOARCH) + fmt.Printf("%s: %s\n", Name, Version) + fmt.Printf("Golang: %s\n", runtime.Version()) + fmt.Printf("OS/Arch: %s/%s\n", runtime.GOOS, runtime.GOARCH) } func printHelp(ctx cli.Context) { - w := new(tabwriter.Writer) - w.Init(os.Stdout, 0, 0, 3, ' ', 0) + w := new(tabwriter.Writer) + w.Init(os.Stdout, 0, 0, 3, ' ', 0) - fmt.Fprintf(w, "%s usage:\n\n", Name) + fmt.Fprintf(w, "%s usage:\n\n", Name) - for _, h := range ctx.Handlers() { - fmt.Fprintf(w, "%s %s\t%s\n", Name, h.Pattern, h.Description) - } + for _, h := range ctx.Handlers() { + fmt.Fprintf(w, "%s %s\t%s\n", Name, h.Pattern, h.Description) + } - w.Flush() + w.Flush() } func printCommandHelp(ctx cli.Context) { - args := ctx.Args() - printCommandPrefixHelp(ctx, args.String("command")) + args := ctx.Args() + printCommandPrefixHelp(ctx, args.String("command")) } func printSubCommandHelp(ctx cli.Context) { - args := ctx.Args() - printCommandPrefixHelp(ctx, args.String("command"), args.String("subcommand")) + args := ctx.Args() + printCommandPrefixHelp(ctx, args.String("command"), args.String("subcommand")) } func printCommandPrefixHelp(ctx cli.Context, prefix ...string) { - handler := getHandler(ctx.Handlers(), prefix) - - if handler == nil { - ExitF("Command not found") - } - - w := new(tabwriter.Writer) - w.Init(os.Stdout, 0, 0, 3, ' ', 0) - - fmt.Fprintf(w, "%s\n", handler.Description) - fmt.Fprintf(w, "%s %s\n", Name, handler.Pattern) - for _, group := range handler.FlagGroups { - fmt.Fprintf(w, "\n%s:\n", group.Name) - for _, flag := range group.Flags { - boolFlag, isBool := flag.(cli.BoolFlag) - if isBool && boolFlag.OmitValue { - fmt.Fprintf(w, " %s\t%s\n", strings.Join(flag.GetPatterns(), ", "), flag.GetDescription()) - } else { - fmt.Fprintf(w, " %s <%s>\t%s\n", strings.Join(flag.GetPatterns(), ", "), flag.GetName(), flag.GetDescription()) - } - } - } - - w.Flush() + handler := getHandler(ctx.Handlers(), prefix) + + if handler == nil { + ExitF("Command not found") + } + + w := new(tabwriter.Writer) + w.Init(os.Stdout, 0, 0, 3, ' ', 0) + + fmt.Fprintf(w, "%s\n", handler.Description) + fmt.Fprintf(w, "%s %s\n", Name, handler.Pattern) + for _, group := range handler.FlagGroups { + fmt.Fprintf(w, "\n%s:\n", group.Name) + for _, flag := range group.Flags { + boolFlag, isBool := flag.(cli.BoolFlag) + if isBool && boolFlag.OmitValue { + fmt.Fprintf(w, " %s\t%s\n", strings.Join(flag.GetPatterns(), ", "), flag.GetDescription()) + } else { + fmt.Fprintf(w, " %s <%s>\t%s\n", strings.Join(flag.GetPatterns(), ", "), flag.GetName(), flag.GetDescription()) + } + } + } + + w.Flush() } func getHandler(handlers []*cli.Handler, prefix []string) *cli.Handler { - for _, h := range handlers { - pattern := stripOptionals(h.SplitPattern()) + for _, h := range handlers { + pattern := stripOptionals(h.SplitPattern()) - if len(prefix) > len(pattern) { - continue - } + if len(prefix) > len(pattern) { + continue + } - if equal(prefix, pattern[:len(prefix)]) { - return h - } - } + if equal(prefix, pattern[:len(prefix)]) { + return h + } + } - return nil + return nil } // Strip optional groups (<...>) from pattern func stripOptionals(pattern []string) []string { - newArgs := []string{} - - for _, arg := range pattern { - if strings.HasPrefix(arg, "[") && strings.HasSuffix(arg, "]") { - continue - } - newArgs = append(newArgs, arg) - } - return newArgs + newArgs := []string{} + + for _, arg := range pattern { + if strings.HasPrefix(arg, "[") && strings.HasSuffix(arg, "]") { + continue + } + newArgs = append(newArgs, arg) + } + return newArgs } diff --git a/util.go b/util.go index 041daed0..dbdd3949 100644 --- a/util.go +++ b/util.go @@ -1,21 +1,21 @@ package main import ( - "runtime" - "path/filepath" - "fmt" - "encoding/json" - "os" - "io" - "crypto/md5" + "crypto/md5" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "runtime" ) func GetDefaultConfigDir() string { - return filepath.Join(Homedir(), ".gdrive") + return filepath.Join(Homedir(), ".gdrive") } func ConfigFilePath(basePath, name string) string { - return filepath.Join(basePath, name) + return filepath.Join(basePath, name) } func Homedir() string { @@ -26,25 +26,25 @@ func Homedir() string { } func equal(a, b []string) bool { - if a == nil && b == nil { - return true; - } + if a == nil && b == nil { + return true + } - if a == nil || b == nil { - return false; - } + if a == nil || b == nil { + return false + } - if len(a) != len(b) { - return false - } + if len(a) != len(b) { + return false + } - for i := range a { - if a[i] != b[i] { - return false - } - } + for i := range a { + if a[i] != b[i] { + return false + } + } - return true + return true } func ExitF(format string, a ...interface{}) { @@ -54,37 +54,37 @@ func ExitF(format string, a ...interface{}) { } func checkErr(err error) { - if err != nil { - fmt.Println(err) - os.Exit(1) - } + if err != nil { + fmt.Println(err) + os.Exit(1) + } } func writeJson(path string, data interface{}) error { - tmpFile := path + ".tmp" - f, err := os.Create(tmpFile) - if err != nil { - return err - } - - err = json.NewEncoder(f).Encode(data) - f.Close() - if err != nil { - os.Remove(tmpFile) - return err - } - - return os.Rename(tmpFile, path) + tmpFile := path + ".tmp" + f, err := os.Create(tmpFile) + if err != nil { + return err + } + + err = json.NewEncoder(f).Encode(data) + f.Close() + if err != nil { + os.Remove(tmpFile) + return err + } + + return os.Rename(tmpFile, path) } func md5sum(path string) string { - h := md5.New() - f, err := os.Open(path) - if err != nil { - return "" - } - defer f.Close() - - io.Copy(h, f) - return fmt.Sprintf("%x", h.Sum(nil)) + h := md5.New() + f, err := os.Open(path) + if err != nil { + return "" + } + defer f.Close() + + io.Copy(h, f) + return fmt.Sprintf("%x", h.Sum(nil)) } From 30b85159f73e0058d24f2d05ce419abc646644fc Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 21 Feb 2016 21:06:07 +0100 Subject: [PATCH 146/195] Remove stale code --- drive/download.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drive/download.go b/drive/download.go index 15495dfb..5fa121d2 100644 --- a/drive/download.go +++ b/drive/download.go @@ -197,10 +197,6 @@ func (self *Drive) saveFile(args saveFileArgs) (int64, int64, error) { // Calculate average download rate rate := calcRate(bytes, started, time.Now()) - //if deleteSourceFile { - // self.Delete(args.Id) - //} - // Close File outFile.Close() From 82dda1b1d2fab5692c483e514b42b931be7d550e Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 21 Feb 2016 21:22:01 +0100 Subject: [PATCH 147/195] s/sync list content/sync content/ --- gdrive.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gdrive.go b/gdrive.go index 94e000c4..0711e754 100644 --- a/gdrive.go +++ b/gdrive.go @@ -419,7 +419,7 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global] sync list content [options] ", + Pattern: "[global] sync content [options] ", Description: "List content of syncable directory", Callback: listRecursiveSyncHandler, FlagGroups: cli.FlagGroups{ From a0853c7d13415a05462f78b8577c9a174b99e3ac Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 21 Feb 2016 22:49:51 +0100 Subject: [PATCH 148/195] s/revisionId/revId/ --- gdrive.go | 4 ++-- handlers_drive.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/gdrive.go b/gdrive.go index 0711e754..c510418d 100644 --- a/gdrive.go +++ b/gdrive.go @@ -618,7 +618,7 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global] revision download [options] ", + Pattern: "[global] revision download [options] ", Description: "Download revision", Callback: downloadRevisionHandler, FlagGroups: cli.FlagGroups{ @@ -651,7 +651,7 @@ func main() { }, }, &cli.Handler{ - Pattern: "[global] revision delete ", + Pattern: "[global] revision delete ", Description: "Delete file revision", Callback: deleteRevisionHandler, FlagGroups: cli.FlagGroups{ diff --git a/handlers_drive.go b/handlers_drive.go index 7812c9b2..baaf050f 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -95,7 +95,7 @@ func downloadRevisionHandler(ctx cli.Context) { err := newDrive(args).DownloadRevision(drive.DownloadRevisionArgs{ Out: os.Stdout, FileId: args.String("fileId"), - RevisionId: args.String("revisionId"), + RevisionId: args.String("revId"), Force: args.Bool("force"), Stdout: args.Bool("stdout"), Path: args.String("path"), @@ -293,7 +293,7 @@ func deleteRevisionHandler(ctx cli.Context) { err := newDrive(args).DeleteRevision(drive.DeleteRevisionArgs{ Out: os.Stdout, FileId: args.String("fileId"), - RevisionId: args.String("revisionId"), + RevisionId: args.String("revId"), }) checkErr(err) } From 4e0e5fedd072ccbdafc487de56e2a3c1d92f2abe Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 21 Feb 2016 23:07:26 +0100 Subject: [PATCH 149/195] Update README for gdrive 2 --- README.md | 790 +++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 668 insertions(+), 122 deletions(-) diff --git a/README.md b/README.md index 01f76970..02828f9a 100644 --- a/README.md +++ b/README.md @@ -3,18 +3,25 @@ gdrive ## Overview -gdrive is a command line utility for uploading and downloading single files to your Google Drive. -This tool on its own does not do synchronization of any kind, if you want that you can use googles own tool. -It is meant for one-off uploads or downloads and integration with other unix tools. One use-case could be -daily uploads of a backup archive for off-site storage. +gdrive is a command line utility for uploading and downloading files to your Google Drive. +It's main goal is for automating backups, but it also has basic sync functionality. +Works on most platforms. ## Prerequisites None, binaries are statically linked. -If you want to compile from source you need the go toolchain: http://golang.org/doc/install +If you want to compile from source you need the [go toolchain](http://golang.org/doc/install). ## Installation -- Save the 'drive' binary to a location in your PATH (i.e. `/usr/local/bin/`) -- Or compile it yourself `go build drive.go` +Save the `gdrive` binary to a location in your PATH (i.e. `/usr/local/bin/`) +The first time gdrive is launched, you will be prompted for a verification code. +The code is obtained by following the printed url and authenticating with the +google account for the drive you want access to. This will create a token file +inside the .gdrive folder in your home directory. Note that anyone with access +to this file will also have access to your google drive. +If you want to manage multiple drives you can use the global `--config` flag +or set the environment variable `GDRIVE_CONFIG_DIR`. +Example: `GDRIVE_CONFIG_DIR="/home/user/.gdrive-secondary" gdrive list` +You will be prompted for a new verification code if the folder does not exist. ### Downloads - [drive-freebsd-386 v1.9.0](https://drive.google.com/uc?id=0B3X9GlR6EmbnNkdVSU1oNUoyQ0U) @@ -28,122 +35,661 @@ If you want to compile from source you need the go toolchain: http://golang.org/ - [drive-windows-386.exe v1.9.0](https://drive.google.com/uc?id=0B3X9GlR6EmbnTXlSc1FqV1dvSTQ) - [drive-windows-x64.exe v1.9.0](https://drive.google.com/uc?id=0B3X9GlR6EmbnZ3gyeGw4d3ozbUk) +## Compile from source +```bash +git clone https://github.com/prasmussen/gdrive.git +cd gdrive +go get ./... +go build -o gdrive +``` + +## Gdrive 2 +Gdrive 2 is more or less a full rewrite and is not backwards compatible +with gdrive 1 as all the command line arguments has changed slightly. +Gdrive 2 uses version 3 of the google drive api and my google-api-go-client +fork is no longer needed. + +### Syncing +Gdrive 2 supports basic syncing. It only syncs one way at the time and works +more like rsync than e.g. dropbox. Files that are synced to google drive +are tagged with an appProperty so that the files on drive can be traversed +faster. This means that you can't upload files with `gdrive upload` into +a sync directory as the files would be missing the sync tag, and would be +ignored by the sync commands. +The current implementation is slow and uses a lot of memory if you are +syncing many files. Currently only one file is uploaded at the time, +the speed can be improved in the future by uploading several files concurrently. +To learn more see usage and the examples below. + +#### .gdriveignore +Placing a .gdriveignore in the root of your sync directory can be used to +skip certain files from being synced. .gdriveignore follows the same +rules as [.gitignore](https://git-scm.com/docs/gitignore). + + ## Usage - drive [global options] [verb options] - -#### Options - Global options: - -a, --advanced Advanced Mode -- lets you specify your own oauth client id and secret on setup - -c, --config Set application path where config and token is stored. Defaults to ~/.gdrive - -v, --version Print version - -h, --help Show this help - - Verbs: - delete: - -i, --id File Id (*) - download: - -i, --id File Id (*) - --format Download file in a specified format (needed for google docs) - -s, --stdout Write file content to stdout - --force Overwrite existing file - --pop Download latest file, and remove it from google drive - folder: - -t, --title Folder to create (*) - -p, --parent Parent Id of the folder - --share Share created folder - info: - -i, --id File Id (*) - --bytes Show size in bytes - list: - -m, --max Max results - --include-docs Include google docs in listing - -t, --title Title filter - -q, --query Query (see https://developers.google.com/drive/search-parameters) - -s, --shared Show shared status (Note: this will generate 1 http req per file) - -n, --noheader Do not show the header - --bytes Show size in bytes - quota: - --bytes Show size in bytes - share: - -i, --id File Id (*) - unshare: - -i, --id File Id (*) - upload: - -f, --file File or directory to upload (*) - -s, --stdin Use stdin as file content (*) - -t, --title Title to give uploaded file. Defaults to filename - -p, --parent Parent Id of the file - --share Share uploaded file - --mimetype The MIME type (default will try to figure it out) - --convert File will be converted to Google Docs format - -C, --chunksize Set chunk size in bytes. Minimum is 262144, default is 4194304. Recommended to be a power of two. - url: - -i, --id File Id (*) - -p, --preview Generate preview url (default) - -d, --download Generate download url +``` +gdrive [global] list [options] List files +gdrive [global] download [options] Download file or directory +gdrive [global] download query [options] Download all files and directories matching query +gdrive [global] upload [options] Upload file or directory +gdrive [global] upload - [options] Upload file from stdin +gdrive [global] update [options] Update file, this creates a new revision of the file +gdrive [global] info [options] Show file info +gdrive [global] mkdir [options] Create directory +gdrive [global] share [options] Share file or directory +gdrive [global] share list List files permissions +gdrive [global] share revoke Revoke permission +gdrive [global] delete [options] Delete file or directory +gdrive [global] sync list [options] List all syncable directories on drive +gdrive [global] sync content [options] List content of syncable directory +gdrive [global] sync download [options] Sync drive directory to local directory +gdrive [global] sync upload [options] Sync local directory to drive +gdrive [global] changes [options] List file changes +gdrive [global] revision list [options] List file revisions +gdrive [global] revision download [options] Download revision +gdrive [global] revision delete Delete file revision +gdrive [global] import [options] Upload and convert file to a google document, see 'about import' for available conversions +gdrive [global] export [options] Export a google document +gdrive [global] about [options] Google drive metadata, quota usage +gdrive [global] about import Show supported import formats +gdrive [global] about export Show supported export formats +gdrive version Print application version +gdrive help Print help +gdrive help Print command help +gdrive help Print subcommand help +``` + +#### List files +``` +gdrive [global] list [options] + +global: + -c, --config Application path, default: /Users/pii/.gdrive + --refresh-token Oauth refresh token used to get access token (for advanced users) + --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) + +options: + -m, --max Max files to list, default: 30 + -q, --query Default query: "trashed = false and 'me' in owners". See https://developers.google.com/drive/search-parameters + --order Sort order. See https://godoc.org/google.golang.org/api/drive/v3#FilesListCall.OrderBy + --name-width Width of name column, default: 40, minimum: 9, use 0 for full width + --absolute Show absolute path to file (will only show path from first parent) + --no-header Dont print the header + --bytes Size in bytes +``` + +#### Download file or directory +``` +gdrive [global] download [options] + +global: + -c, --config Application path, default: /Users/pii/.gdrive + --refresh-token Oauth refresh token used to get access token (for advanced users) + --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) + +options: + -f, --force Overwrite existing file + -r, --recursive Download directory recursively, documents will be skipped + --path Download path + --delete Delete remote file when download is successful + --no-progress Hide progress + --stdout Write file content to stdout +``` + +#### Download all files and directories matching query +``` +gdrive [global] download query [options] + +global: + -c, --config Application path, default: /Users/pii/.gdrive + --refresh-token Oauth refresh token used to get access token (for advanced users) + --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) + +options: + -f, --force Overwrite existing file + -r, --recursive Download directories recursively, documents will be skipped + --path Download path + --no-progress Hide progress +``` + +#### Upload file or directory +``` +gdrive [global] upload [options] + +global: + -c, --config Application path, default: /Users/pii/.gdrive + --refresh-token Oauth refresh token used to get access token (for advanced users) + --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) + +options: + -r, --recursive Upload directory recursively + -p, --parent Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents + --name Filename + --no-progress Hide progress + --mime Force mime type + --share Share file + --delete Delete local file when upload is successful + --chunksize Set chunk size in bytes, default: 8388608 +``` + +#### Upload file from stdin +``` +gdrive [global] upload - [options] + +global: + -c, --config Application path, default: /Users/pii/.gdrive + --refresh-token Oauth refresh token used to get access token (for advanced users) + --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) + +options: + -p, --parent Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents + --chunksize Set chunk size in bytes, default: 8388608 + --mime Force mime type + --share Share file + --no-progress Hide progress +``` + +#### Update file, this creates a new revision of the file +``` +gdrive [global] update [options] + +global: + -c, --config Application path, default: /Users/pii/.gdrive + --refresh-token Oauth refresh token used to get access token (for advanced users) + --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) + +options: + -p, --parent Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents + --name Filename + --no-progress Hide progress + --mime Force mime type + --chunksize Set chunk size in bytes, default: 8388608 +``` + +#### Show file info +``` +gdrive [global] info [options] + +global: + -c, --config Application path, default: /Users/pii/.gdrive + --refresh-token Oauth refresh token used to get access token (for advanced users) + --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) + +options: + --bytes Show size in bytes +``` + +#### Create directory +``` +gdrive [global] mkdir [options] + +global: + -c, --config Application path, default: /Users/pii/.gdrive + --refresh-token Oauth refresh token used to get access token (for advanced users) + --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) + +options: + -p, --parent Parent id of created directory, can be specified multiple times to give many parents +``` + +#### Share file or directory +``` +gdrive [global] share [options] + +global: + -c, --config Application path, default: /Users/pii/.gdrive + --refresh-token Oauth refresh token used to get access token (for advanced users) + --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) + +options: + --role Share role: owner/writer/commenter/reader, default: reader + --type Share type: user/group/domain/anyone, default: anyone + --email The email address of the user or group to share the file with. Requires 'user' or 'group' as type + --discoverable Make file discoverable by search engines + --revoke Delete all sharing permissions (owner roles will be skipped) +``` + +#### List files permissions +``` +gdrive [global] share list + +global: + -c, --config Application path, default: /Users/pii/.gdrive + --refresh-token Oauth refresh token used to get access token (for advanced users) + --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) +``` + +#### Revoke permission +``` +gdrive [global] share revoke + +global: + -c, --config Application path, default: /Users/pii/.gdrive + --refresh-token Oauth refresh token used to get access token (for advanced users) + --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) +``` + +#### Delete file or directory +``` +gdrive [global] delete [options] + +global: + -c, --config Application path, default: /Users/pii/.gdrive + --refresh-token Oauth refresh token used to get access token (for advanced users) + --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) + +options: + -r, --recursive Delete directory and all it's content +``` + +#### List all syncable directories on drive +``` +gdrive [global] sync list [options] + +global: + -c, --config Application path, default: /Users/pii/.gdrive + --refresh-token Oauth refresh token used to get access token (for advanced users) + --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) + +options: + --no-header Dont print the header +``` + +#### List content of syncable directory +``` +gdrive [global] sync content [options] + +global: + -c, --config Application path, default: /Users/pii/.gdrive + --refresh-token Oauth refresh token used to get access token (for advanced users) + --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) + +options: + --order Sort order. See https://godoc.org/google.golang.org/api/drive/v3#FilesListCall.OrderBy + --path-width Width of path column, default: 60, minimum: 9, use 0 for full width + --no-header Dont print the header + --bytes Size in bytes +``` + +#### Sync drive directory to local directory +``` +gdrive [global] sync download [options] + +global: + -c, --config Application path, default: /Users/pii/.gdrive + --refresh-token Oauth refresh token used to get access token (for advanced users) + --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) + +options: + --keep-remote Keep remote file when a conflict is encountered + --keep-local Keep local file when a conflict is encountered + --keep-largest Keep largest file when a conflict is encountered + --delete-extraneous Delete extraneous local files + --dry-run Show what would have been transferred + --no-progress Hide progress +``` + +#### Sync local directory to drive +``` +gdrive [global] sync upload [options] + +global: + -c, --config Application path, default: /Users/pii/.gdrive + --refresh-token Oauth refresh token used to get access token (for advanced users) + --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) + +options: + --keep-remote Keep remote file when a conflict is encountered + --keep-local Keep local file when a conflict is encountered + --keep-largest Keep largest file when a conflict is encountered + --delete-extraneous Delete extraneous remote files + --dry-run Show what would have been transferred + --no-progress Hide progress + --chunksize Set chunk size in bytes, default: 8388608 +``` + +#### List file changes +``` +gdrive [global] changes [options] + +global: + -c, --config Application path, default: /Users/pii/.gdrive + --refresh-token Oauth refresh token used to get access token (for advanced users) + --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) + +options: + -m, --max Max changes to list, default: 100 + --since Page token to start listing changes from + --now Get latest page token + --name-width Width of name column, default: 40, minimum: 9, use 0 for full width + --no-header Dont print the header +``` + +#### List file revisions +``` +gdrive [global] revision list [options] + +global: + -c, --config Application path, default: /Users/pii/.gdrive + --refresh-token Oauth refresh token used to get access token (for advanced users) + --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) + +options: + --name-width Width of name column, default: 40, minimum: 9, use 0 for full width + --no-header Dont print the header + --bytes Size in bytes +``` + +#### Download revision +``` +gdrive [global] revision download [options] + +global: + -c, --config Application path, default: /Users/pii/.gdrive + --refresh-token Oauth refresh token used to get access token (for advanced users) + --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) + +options: + -f, --force Overwrite existing file + --no-progress Hide progress + --stdout Write file content to stdout + --path Download path +``` + +#### Delete file revision +``` +gdrive [global] revision delete + +global: + -c, --config Application path, default: /Users/pii/.gdrive + --refresh-token Oauth refresh token used to get access token (for advanced users) + --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) +``` + +#### Upload and convert file to a google document, see 'about import' for available conversions +``` +gdrive [global] import [options] + +global: + -c, --config Application path, default: /Users/pii/.gdrive + --refresh-token Oauth refresh token used to get access token (for advanced users) + --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) + +options: + -p, --parent Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents + --no-progress Hide progress +``` + +#### Export a google document +``` +gdrive [global] export [options] + +global: + -c, --config Application path, default: /Users/pii/.gdrive + --refresh-token Oauth refresh token used to get access token (for advanced users) + --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) + +options: + -f, --force Overwrite existing file + --mime Mime type of exported file + --print-mimes Print available mime types for given file +``` + +#### Google drive metadata, quota usage +``` +gdrive [global] about [options] + +global: + -c, --config Application path, default: /Users/pii/.gdrive + --refresh-token Oauth refresh token used to get access token (for advanced users) + --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) + +options: + --bytes Show size in bytes +``` + +#### Show supported import formats +``` +gdrive [global] about import + +global: + -c, --config Application path, default: /Users/pii/.gdrive + --refresh-token Oauth refresh token used to get access token (for advanced users) + --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) +``` + +#### Show supported export formats +``` +gdrive [global] about export + +global: + -c, --config Application path, default: /Users/pii/.gdrive + --refresh-token Oauth refresh token used to get access token (for advanced users) + --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) +``` + ## Examples -###### List files - $ drive list - Id Title Size Created - 0B3X9GlR6EmbnenBYSFI4MzN0d2M drive-freebsd-amd64 5 MB 2013-01-01 21:57:01 - 0B3X9GlR6EmbnOVRQN0t6RkxVQk0 drive-windows-amd64.exe 5 MB 2013-01-01 21:56:41 - 0B3X9GlR6Embnc1BtVVU1ZHp2UjQ drive-linux-arm 4 MB 2013-01-01 21:57:23 - 0B3X9GlR6EmbnU0ZnbGV4dlk1T00 drive-linux-amd64 5 MB 2013-01-01 21:55:06 - 0B3X9GlR6EmbncTk1TXlMdjd1ODQ drive-darwin-amd64 5 MB 2013-01-01 21:53:34 - -###### Upload file or directory - $ drive upload --file drive-linux-amd64 - Id: 0B3X9GlR6EmbnU0ZnbGV4dlk1T00 - Title: drive-linux-amd64 - Size: 5 MB - Created: 2013-01-01 21:55:06 - Modified: 2013-01-01 21:55:06 - Owner: Petter Rasmussen - Md5sum: 334ad48f6e64646071f302275ce19a94 - Shared: False - Uploaded 'drive-linux-amd64' at 510 KB/s, total 5 MB - -###### Download file - $ drive download --id 0B3X9GlR6EmbnenBYSFI4MzN0d2M - Downloaded 'drive-freebsd-amd64' at 2 MB/s, total 5 MB - -###### Download google doc as docx - $ drive download -i 1ooNQwHdIRNcm-boOU7RVH8mQKhoOJ8rgABWBXleytg0 --format docx - Downloaded 'testdoc.docx' at 4.3 KB/s, total 4.3 KB - -###### Share a file - $ drive share --id 0B3X9GlR6EmbnOVRQN0t6RkxVQk0 - File 'drive-windows-amd64.exe' is now readable by everyone @ https://drive.google.com/uc?id=0B3X9GlR6EmbnOVRQN0t6RkxVQk0 - -###### Pipe content directly to your drive - $ echo "Hello World" | drive upload --stdin --title hello.txt - Id: 0B3X9GlR6EmbnVHlHZWZCZVJ4eGs - Title: hello.txt - Size: 12 B - Created: 2013-01-01 22:05:44 - Modified: 2013-01-01 22:05:43 - Owner: Petter Rasmussen - Md5sum: e59ff97941044f85df5297e1c302d260 - Shared: False - Uploaded 'hello.txt' at 6 B/s, total 12 B - -###### Print file to stdout - $ drive download --stdout --id 0B3X9GlR6EmbnVHlHZWZCZVJ4eGs - Hello World - -###### Get file info - $ drive info --id 0B3X9GlR6EmbnVHlHZWZCZVJ4eGs - Id: 0B3X9GlR6EmbnVHlHZWZCZVJ4eGs - Title: hello.txt - Size: 12 B - Created: 2013-01-01 22:05:44 - Modified: 2013-01-01 22:05:43 - Owner: Petter Rasmussen - Md5sum: e59ff97941044f85df5297e1c302d260 - Shared: False - -###### Get a url to the file - $ drive url --id 0B3X9GlR6EmbnVHlHZWZCZVJ4eGs - https://drive.google.com/uc?id=0B3X9GlR6EmbnVHlHZWZCZVJ4eGs +#### List files +``` +$ gdrive list +Id Name Type Size Created +0B3X9GlR6EmbnZ3gyeGw4d3ozbUk drive-windows-x64.exe bin 6.6 MB 2015-07-18 16:43:58 +0B3X9GlR6EmbnTXlSc1FqV1dvSTQ drive-windows-386.exe bin 5.2 MB 2015-07-18 16:43:53 +0B3X9GlR6EmbnVjIzMDRqck1aekE drive-osx-x64 bin 6.5 MB 2015-07-18 16:43:50 +0B3X9GlR6EmbnbEpXdlhza25zT1U drive-osx-386 bin 5.2 MB 2015-07-18 16:43:41 +0B3X9GlR6Embnb095MGxEYmJhY2c drive-linux-x64 bin 6.5 MB 2015-07-18 16:43:38 +``` + +#### Upload file +``` +$ gdrive upload gdrive-osx-x64 +Uploading gdrive-osx-x64 +Uploaded 0B3X9GlR6EmbnZ1NGS25FdEVlWEk at 3.8 MB/s, total 8.3 MB +``` + +#### Make directory +``` +$ gdrive mkdir gdrive-bin +Directory 0B3X9GlR6EmbnY1RLVTk5VUtOVkk created +``` + +#### Upload file to directory +``` +$ gdrive upload --parent 0B3X9GlR6EmbnY1RLVTk5VUtOVkk gdrive-osx-x64 +Uploading gdrive-osx-x64 +Uploaded 0B3X9GlR6EmbnNTk0SkV0bm5Hd0E at 2.5 MB/s, total 8.3 MB +``` + +#### Download file +``` +$ gdrive download 0B3X9GlR6EmbnZ1NGS25FdEVlWEk +Downloading gdrive-osx-x64 -> gdrive-osx-x64 +Downloaded 0B3X9GlR6EmbnZ1NGS25FdEVlWEk at 8.3 MB/s, total 8.3 MB +``` + +#### Share a file +``` +$ gdrive share 0B3X9GlR6EmbnNTk0SkV0bm5Hd0E +Granted reader permission to anyone +``` + +#### Pipe content directly to google drive +``` +$ echo "Hello World" | gdrive upload - hello.txt +Uploading hello.txt +Uploaded 0B3X9GlR6EmbnaXVrOUpIcWlUS0E at 8.0 B/s, total 12.0 B +``` + +#### Print file to stdout +``` +$ gdrive download --stdout 0B3X9GlR6EmbnaXVrOUpIcWlUS0E +Hello World +``` + +#### Get file info +``` +$ gdrive info 0B3X9GlR6EmbnNTk0SkV0bm5Hd0E +Id: 0B3X9GlR6EmbnNTk0SkV0bm5Hd0E +Name: gdrive-osx-x64 +Path: gdrive-bin/gdrive-osx-x64 +Mime: application/octet-stream +Size: 8.3 MB +Created: 2016-02-21 20:47:04 +Modified: 2016-02-21 20:47:04 +Md5sum: b607f29231a3b2d16098c4212516470f +Shared: True +Parents: 0B3X9GlR6EmbnY1RLVTk5VUtOVkk +ViewUrl: https://drive.google.com/file/d/0B3X9GlR6EmbnNTk0SkV0bm5Hd0E/view?usp=drivesdk +DownloadUrl: https://docs.google.com/uc?id=0B3X9GlR6EmbnNTk0SkV0bm5Hd0E&export=download +``` + +#### Update file (create new revision) +``` +$ gdrive update 0B3X9GlR6EmbnNTk0SkV0bm5Hd0E gdrive-osx-x64 +Uploading gdrive-osx-x64 +Updated 0B3X9GlR6EmbnNTk0SkV0bm5Hd0E at 2.0 MB/s, total 8.3 MB +``` + +#### List file revisions +``` +$ gdrive revision list 0B3X9GlR6EmbnNTk0SkV0bm5Hd0E +Id Name Size Modified KeepForever +0B3X9GlR6EmbnOFlHSTZQNWJWMGN2ckZucC9VaEUwczV1cUNrPQ gdrive-osx-x64 8.3 MB 2016-02-21 20:47:04 False +0B3X9GlR6EmbndVEwMlZCUldGWUlPb2lTS25rOFo1L2t6c2ZVPQ gdrive-osx-x64 8.3 MB 2016-02-21 21:12:09 False +``` + +#### Download revision +``` +$ gdrive revision download 0B3X9GlR6EmbnNTk0SkV0bm5Hd0E 0B3X9GlR6EmbnOFlHSTZQNWJWMGN2ckZucC9VaEUwczV1cUNrPQ +Downloading gdrive-osx-x64 -> gdrive-osx-x64 +Download complete, rate: 8.3 MB/s, total size: 8.3 MB +``` + +#### Export google doc as docx +``` +$ gdrive export --mime application/vnd.openxmlformats-officedocument.wordprocessingml.document 1Kt5A8X7X2RQrEi5t6Y9W1LayRc4hyrFiG63y2dIJEvk +Exported 'foo.docx' with mime type: 'application/vnd.openxmlformats-officedocument.wordprocessingml.document' +``` + +#### Import csv as google spreadsheet +``` +$ gdrive import foo.csv +Imported 1mTl3DjIvap4tpTX_oMkDcbDT8ShtiGJRlozTfkXpeko with mime type: 'application/vnd.google-apps.spreadsheet' +``` + +#### Syncing directory to drive +``` +# Create directory on drive +$ gdrive mkdir drive-bin +Directory 0B3X9GlR6EmbnOEd6cEh6bU9XZWM created + +# Sync to drive +$ gdrive sync upload _release/bin 0B3X9GlR6EmbnOEd6cEh6bU9XZWM +Starting sync... +Collecting local and remote file information... +Found 32 local files and 0 remote files + +6 remote directories are missing +[0001/0006] Creating directory drive-bin/bsd +[0002/0006] Creating directory drive-bin/linux +[0003/0006] Creating directory drive-bin/osx +[0004/0006] Creating directory drive-bin/plan9 +[0005/0006] Creating directory drive-bin/solaris +[0006/0006] Creating directory drive-bin/windows + +26 remote files are missing +[0001/0026] Uploading bsd/gdrive-dragonfly-x64 -> drive-bin/bsd/gdrive-dragonfly-x64 +[0002/0026] Uploading bsd/gdrive-freebsd-386 -> drive-bin/bsd/gdrive-freebsd-386 +[0003/0026] Uploading bsd/gdrive-freebsd-arm -> drive-bin/bsd/gdrive-freebsd-arm +[0004/0026] Uploading bsd/gdrive-freebsd-x64 -> drive-bin/bsd/gdrive-freebsd-x64 +[0005/0026] Uploading bsd/gdrive-netbsd-386 -> drive-bin/bsd/gdrive-netbsd-386 +[0006/0026] Uploading bsd/gdrive-netbsd-arm -> drive-bin/bsd/gdrive-netbsd-arm +[0007/0026] Uploading bsd/gdrive-netbsd-x64 -> drive-bin/bsd/gdrive-netbsd-x64 +[0008/0026] Uploading bsd/gdrive-openbsd-386 -> drive-bin/bsd/gdrive-openbsd-386 +[0009/0026] Uploading bsd/gdrive-openbsd-arm -> drive-bin/bsd/gdrive-openbsd-arm +[0010/0026] Uploading bsd/gdrive-openbsd-x64 -> drive-bin/bsd/gdrive-openbsd-x64 +[0011/0026] Uploading linux/gdrive-linux-386 -> drive-bin/linux/gdrive-linux-386 +[0012/0026] Uploading linux/gdrive-linux-arm -> drive-bin/linux/gdrive-linux-arm +[0013/0026] Uploading linux/gdrive-linux-arm64 -> drive-bin/linux/gdrive-linux-arm64 +[0014/0026] Uploading linux/gdrive-linux-mips64 -> drive-bin/linux/gdrive-linux-mips64 +[0015/0026] Uploading linux/gdrive-linux-mips64le -> drive-bin/linux/gdrive-linux-mips64le +[0016/0026] Uploading linux/gdrive-linux-ppc64 -> drive-bin/linux/gdrive-linux-ppc64 +[0017/0026] Uploading linux/gdrive-linux-ppc64le -> drive-bin/linux/gdrive-linux-ppc64le +[0018/0026] Uploading linux/gdrive-linux-x64 -> drive-bin/linux/gdrive-linux-x64 +[0019/0026] Uploading osx/gdrive-osx-386 -> drive-bin/osx/gdrive-osx-386 +[0020/0026] Uploading osx/gdrive-osx-arm -> drive-bin/osx/gdrive-osx-arm +[0021/0026] Uploading osx/gdrive-osx-x64 -> drive-bin/osx/gdrive-osx-x64 +[0022/0026] Uploading plan9/gdrive-plan9-386 -> drive-bin/plan9/gdrive-plan9-386 +[0023/0026] Uploading plan9/gdrive-plan9-x64 -> drive-bin/plan9/gdrive-plan9-x64 +[0024/0026] Uploading solaris/gdrive-solaris-x64 -> drive-bin/solaris/gdrive-solaris-x64 +[0025/0026] Uploading windows/gdrive-windows-386.exe -> drive-bin/windows/gdrive-windows-386.exe +[0026/0026] Uploading windows/gdrive-windows-x64.exe -> drive-bin/windows/gdrive-windows-x64.exe +Sync finished in 1m18.891946279s + +# Add new local file +$ echo "google drive binaries" > _release/bin/readme.txt + +# Sync again +$ gdrive sync upload _release/bin 0B3X9GlR6EmbnOEd6cEh6bU9XZWM +Starting sync... +Collecting local and remote file information... +Found 33 local files and 32 remote files + +1 remote files are missing +[0001/0001] Uploading readme.txt -> drive-bin/readme.txt +Sync finished in 2.201339535s + +# Modify local file +$ echo "for all platforms" >> _release/bin/readme.txt + +# Sync again +$ gdrive sync upload _release/bin 0B3X9GlR6EmbnOEd6cEh6bU9XZWM +Starting sync... +Collecting local and remote file information... +Found 33 local files and 33 remote files + +1 local files has changed +[0001/0001] Updating readme.txt -> drive-bin/readme.txt +Sync finished in 1.890244258s +``` +#### List content of sync directory +``` +$ gdrive sync content 0B3X9GlR6EmbnOEd6cEh6bU9XZWM +Id Path Type Size Modified +0B3X9GlR6EmbnMldxMFV1UGVMTlE bsd dir 2016-02-21 22:54:00 +0B3X9GlR6EmbnM05sQ3hVUnJnOXc bsd/gdrive-dragonfly-x64 bin 7.8 MB 2016-02-21 22:54:14 +0B3X9GlR6EmbnVy1KXzA4dlU5RVE bsd/gdrive-freebsd-386 bin 6.1 MB 2016-02-21 22:54:18 +0B3X9GlR6Embnb29QQkFtSlRiZnc bsd/gdrive-freebsd-arm bin 6.1 MB 2016-02-21 22:54:20 +0B3X9GlR6EmbnMkFQYVpSaHhHTXM bsd/gdrive-freebsd-x64 bin 7.8 MB 2016-02-21 22:54:23 +0B3X9GlR6EmbnVmJRMl9hUDloVU0 bsd/gdrive-netbsd-386 bin 6.1 MB 2016-02-21 22:54:25 +0B3X9GlR6EmbnLVlTZWpxOEF4Q2s bsd/gdrive-netbsd-arm bin 6.1 MB 2016-02-21 22:54:28 +0B3X9GlR6EmbnOENUZmh3anJmNG8 bsd/gdrive-netbsd-x64 bin 7.8 MB 2016-02-21 22:54:30 +0B3X9GlR6EmbnWTRoQ2ZVQXRfQlU bsd/gdrive-openbsd-386 bin 6.1 MB 2016-02-21 22:54:32 +0B3X9GlR6EmbncEtlN3ZuQ0VUWms bsd/gdrive-openbsd-arm bin 6.1 MB 2016-02-21 22:54:35 +0B3X9GlR6EmbnMlFLY1ptNEFyZWc bsd/gdrive-openbsd-x64 bin 7.8 MB 2016-02-21 22:54:38 +0B3X9GlR6EmbncGtSajQyNzloVEE linux dir 2016-02-21 22:54:01 +0B3X9GlR6EmbnMWVudkJmb1NZdmM linux/gdrive-linux-386 bin 6.1 MB 2016-02-21 22:54:40 +0B3X9GlR6Embnbnpla1R2VHV5T2M linux/gdrive-linux-arm bin 6.1 MB 2016-02-21 22:54:42 +0B3X9GlR6EmbnM0s2cU1YWkNJSjA linux/gdrive-linux-arm64 bin 7.7 MB 2016-02-21 22:54:45 +0B3X9GlR6EmbnNU9NNi1TdDc4S2c linux/gdrive-linux-mips64 bin 8.5 MB 2016-02-21 22:54:47 +0B3X9GlR6EmbnSmdQNjRKZ2dWV1U linux/gdrive-linux-mips64le bin 8.5 MB 2016-02-21 22:54:50 +0B3X9GlR6EmbnS0g0OVgxMHY5Z3c linux/gdrive-linux-ppc64 bin 7.8 MB 2016-02-21 22:54:52 +0B3X9GlR6EmbneVp6ZXRpR3FhWlU linux/gdrive-linux-ppc64le bin 7.8 MB 2016-02-21 22:54:54 +0B3X9GlR6EmbnczdJT195dFVxdU0 linux/gdrive-linux-x64 bin 7.8 MB 2016-02-21 22:54:57 +0B3X9GlR6EmbnTXZXeDRnSDdVS1E osx dir 2016-02-21 22:54:02 +0B3X9GlR6EmbnWnRheXJNR0pUMU0 osx/gdrive-osx-386 bin 6.6 MB 2016-02-21 22:54:59 +0B3X9GlR6EmbnRzNqMWFXdDR1Rms osx/gdrive-osx-arm bin 6.6 MB 2016-02-21 22:55:01 +0B3X9GlR6EmbnaDlVWTZDd0JIeEU osx/gdrive-osx-x64 bin 8.3 MB 2016-02-21 22:55:04 +0B3X9GlR6EmbnWW84UFBvbHlURXM plan9 dir 2016-02-21 22:54:02 +0B3X9GlR6EmbnTmc0a2RNdDZDRUU plan9/gdrive-plan9-386 bin 5.8 MB 2016-02-21 22:55:07 +0B3X9GlR6EmbnT1pYZ2p4Sk9FVFk plan9/gdrive-plan9-x64 bin 7.4 MB 2016-02-21 22:55:10 +0B3X9GlR6EmbnbnZnXzlYVHoxdk0 readme.txt bin 40.0 B 2016-02-21 22:59:56 +0B3X9GlR6EmbnSWF1QUlta3RnaGc solaris dir 2016-02-21 22:54:03 +0B3X9GlR6EmbnaWFOV0YxSGs5Znc solaris/gdrive-solaris-x64 bin 7.7 MB 2016-02-21 22:55:13 +0B3X9GlR6EmbnNE5ySkEzbWQ4Qms windows dir 2016-02-21 22:54:03 +0B3X9GlR6EmbnX1RIT2w1TWZYWFU windows/gdrive-windows-386.exe bin 6.1 MB 2016-02-21 22:55:15 +0B3X9GlR6EmbndmVMU05POGRPS3c windows/gdrive-windows-x64.exe bin 7.8 MB 2016-02-21 22:55:18 +``` From a7256ab212bdc61e9b681210dd0413299f31db1e Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Mon, 22 Feb 2016 01:05:39 +0100 Subject: [PATCH 150/195] Replace username --- README.md | 50 ++++++++++++++++---------------- _release/print_usage_markdown.sh | 2 +- 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/README.md b/README.md index 02828f9a..08d2b071 100644 --- a/README.md +++ b/README.md @@ -105,7 +105,7 @@ gdrive help Print subcommand gdrive [global] list [options] global: - -c, --config Application path, default: /Users/pii/.gdrive + -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) @@ -124,7 +124,7 @@ options: gdrive [global] download [options] global: - -c, --config Application path, default: /Users/pii/.gdrive + -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) @@ -142,7 +142,7 @@ options: gdrive [global] download query [options] global: - -c, --config Application path, default: /Users/pii/.gdrive + -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) @@ -158,7 +158,7 @@ options: gdrive [global] upload [options] global: - -c, --config Application path, default: /Users/pii/.gdrive + -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) @@ -178,7 +178,7 @@ options: gdrive [global] upload - [options] global: - -c, --config Application path, default: /Users/pii/.gdrive + -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) @@ -195,7 +195,7 @@ options: gdrive [global] update [options] global: - -c, --config Application path, default: /Users/pii/.gdrive + -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) @@ -212,7 +212,7 @@ options: gdrive [global] info [options] global: - -c, --config Application path, default: /Users/pii/.gdrive + -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) @@ -225,7 +225,7 @@ options: gdrive [global] mkdir [options] global: - -c, --config Application path, default: /Users/pii/.gdrive + -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) @@ -238,7 +238,7 @@ options: gdrive [global] share [options] global: - -c, --config Application path, default: /Users/pii/.gdrive + -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) @@ -255,7 +255,7 @@ options: gdrive [global] share list global: - -c, --config Application path, default: /Users/pii/.gdrive + -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) ``` @@ -265,7 +265,7 @@ global: gdrive [global] share revoke global: - -c, --config Application path, default: /Users/pii/.gdrive + -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) ``` @@ -275,7 +275,7 @@ global: gdrive [global] delete [options] global: - -c, --config Application path, default: /Users/pii/.gdrive + -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) @@ -288,7 +288,7 @@ options: gdrive [global] sync list [options] global: - -c, --config Application path, default: /Users/pii/.gdrive + -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) @@ -301,7 +301,7 @@ options: gdrive [global] sync content [options] global: - -c, --config Application path, default: /Users/pii/.gdrive + -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) @@ -317,7 +317,7 @@ options: gdrive [global] sync download [options] global: - -c, --config Application path, default: /Users/pii/.gdrive + -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) @@ -335,7 +335,7 @@ options: gdrive [global] sync upload [options] global: - -c, --config Application path, default: /Users/pii/.gdrive + -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) @@ -354,7 +354,7 @@ options: gdrive [global] changes [options] global: - -c, --config Application path, default: /Users/pii/.gdrive + -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) @@ -371,7 +371,7 @@ options: gdrive [global] revision list [options] global: - -c, --config Application path, default: /Users/pii/.gdrive + -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) @@ -386,7 +386,7 @@ options: gdrive [global] revision download [options] global: - -c, --config Application path, default: /Users/pii/.gdrive + -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) @@ -402,7 +402,7 @@ options: gdrive [global] revision delete global: - -c, --config Application path, default: /Users/pii/.gdrive + -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) ``` @@ -412,7 +412,7 @@ global: gdrive [global] import [options] global: - -c, --config Application path, default: /Users/pii/.gdrive + -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) @@ -426,7 +426,7 @@ options: gdrive [global] export [options] global: - -c, --config Application path, default: /Users/pii/.gdrive + -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) @@ -441,7 +441,7 @@ options: gdrive [global] about [options] global: - -c, --config Application path, default: /Users/pii/.gdrive + -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) @@ -454,7 +454,7 @@ options: gdrive [global] about import global: - -c, --config Application path, default: /Users/pii/.gdrive + -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) ``` @@ -464,7 +464,7 @@ global: gdrive [global] about export global: - -c, --config Application path, default: /Users/pii/.gdrive + -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) ``` diff --git a/_release/print_usage_markdown.sh b/_release/print_usage_markdown.sh index e0cbc890..f81ad08f 100755 --- a/_release/print_usage_markdown.sh +++ b/_release/print_usage_markdown.sh @@ -12,6 +12,6 @@ help=$(gdrive help | grep global | sed -E 's/ \[[^]]+\]//g' | sed -E 's/ <[^>]+> for args in $help; do cmd="gdrive help $args" echo - eval $cmd | sed -e '1s/^/#### /' | sed -e $'1s/$/\\\n```/' + eval $cmd | sed -e '1s/^/#### /' | sed -e $'1s/$/\\\n```/' | sed -e 's/pii//' echo '```' done From cabd73299fade0ff2fdc3732ebece151e9edf058 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Mon, 22 Feb 2016 20:46:53 +0100 Subject: [PATCH 151/195] Add raspberry pi build --- _release/build-all.sh | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/_release/build-all.sh b/_release/build-all.sh index 7f05036f..e858599b 100755 --- a/_release/build-all.sh +++ b/_release/build-all.sh @@ -1,7 +1,7 @@ #!/bin/bash APP_NAME="gdrive" -PLATFORMS="darwin/386 darwin/amd64 darwin/arm darwin/arm64 dragonfly/amd64 freebsd/386 freebsd/amd64 freebsd/arm linux/386 linux/amd64 linux/arm linux/arm64 linux/ppc64 linux/ppc64le linux/mips64 linux/mips64le netbsd/386 netbsd/amd64 netbsd/arm openbsd/386 openbsd/amd64 openbsd/arm plan9/386 plan9/amd64 solaris/amd64 windows/386 windows/amd64" +PLATFORMS="darwin/386 darwin/amd64 darwin/arm darwin/arm64 dragonfly/amd64 freebsd/386 freebsd/amd64 freebsd/arm linux/386 linux/amd64 linux/arm linux/arm64 linux/ppc64 linux/ppc64le linux/mips64 linux/mips64le linux/rpi netbsd/386 netbsd/amd64 netbsd/arm openbsd/386 openbsd/amd64 openbsd/arm plan9/386 plan9/amd64 solaris/amd64 windows/386 windows/amd64" BIN_PATH="_release/bin" @@ -19,8 +19,16 @@ for PLATFORM in $PLATFORMS; do BIN_NAME="${BIN_NAME}.exe" fi + # Raspberrypi seems to need arm5 binaries + if [ $GOARCH == "rpi" ]; then + export GOARM=5 + GOARCH="arm" + else + unset GOARM + fi + export GOOS=$GOOS - export GOARCH=$GOARCH go build + export GOARCH=$GOARCH echo "Building $BIN_NAME" go build -ldflags '-w -s' -o ${BIN_PATH}/${BIN_NAME} From 4a9815ed8ddf89a90c51ceaa41fe00b1db3b99ba Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Mon, 22 Feb 2016 21:55:40 +0100 Subject: [PATCH 152/195] Add largest files example --- README.md | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 08d2b071..cbe17894 100644 --- a/README.md +++ b/README.md @@ -3,9 +3,7 @@ gdrive ## Overview -gdrive is a command line utility for uploading and downloading files to your Google Drive. -It's main goal is for automating backups, but it also has basic sync functionality. -Works on most platforms. +gdrive is a command line utility for interacting with Google Drive. ## Prerequisites None, binaries are statically linked. @@ -482,6 +480,15 @@ Id Name Type Size Created 0B3X9GlR6Embnb095MGxEYmJhY2c drive-linux-x64 bin 6.5 MB 2015-07-18 16:43:38 ``` +#### List largest files +``` +$ gdrive list --query "name contains 'gdrive'" --order "quotaBytesUsed desc" -m 3 +Id Name Type Size Created +0B3X9GlR6EmbnZXpDRG1xblM2LTg gdrive-linux-mips64 bin 8.5 MB 2016-02-22 21:07:04 +0B3X9GlR6EmbnNW5CTV8xdFkxTjg gdrive-linux-mips64le bin 8.5 MB 2016-02-22 21:07:07 +0B3X9GlR6EmbnZ1NGS25FdEVlWEk gdrive-osx-x64 bin 8.3 MB 2016-02-21 20:22:13 +``` + #### Upload file ``` $ gdrive upload gdrive-osx-x64 From 34e963defda7714b41a6eee459692cf144ed4c4c Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Mon, 22 Feb 2016 22:18:41 +0100 Subject: [PATCH 153/195] Update installation instructions --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index cbe17894..1bc3bdff 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,8 @@ None, binaries are statically linked. If you want to compile from source you need the [go toolchain](http://golang.org/doc/install). ## Installation -Save the `gdrive` binary to a location in your PATH (i.e. `/usr/local/bin/`) +Download `gdrive` from one of the links below. On unix systems +run `chmod +x gdrive` after download to make the binary executable. The first time gdrive is launched, you will be prompted for a verification code. The code is obtained by following the printed url and authenticating with the google account for the drive you want access to. This will create a token file From 342082f849f60c65c814ee9fc64bf96f9815be59 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Tue, 23 Feb 2016 00:12:31 +0100 Subject: [PATCH 154/195] Update upload script --- _release/upload.sh | 104 +++++++++++++++++++++++++++++++++++++++------ 1 file changed, 91 insertions(+), 13 deletions(-) diff --git a/_release/upload.sh b/_release/upload.sh index ec66841c..8832ef83 100755 --- a/_release/upload.sh +++ b/_release/upload.sh @@ -1,22 +1,100 @@ -#!/bin/bash +#!/usr/local/bin/bash + +# Grab application version +VERSION=$(_release/bin/gdrive-osx-x64 version | awk 'NR==1 {print $2}') + +declare -a filenames +filenames=( + "gdrive-osx-x64" + "gdrive-osx-386" + "gdrive-osx-arm" + "gdrive-linux-x64" + "gdrive-linux-386" + "gdrive-linux-rpi" + "gdrive-linux-arm64" + "gdrive-linux-arm" + "gdrive-linux-mips64" + "gdrive-linux-mips64le" + "gdrive-linux-ppc64" + "gdrive-linux-ppc64le" + "gdrive-windows-386.exe" + "gdrive-windows-x64.exe" + "gdrive-dragonfly-x64" + "gdrive-freebsd-x64" + "gdrive-freebsd-386" + "gdrive-freebsd-arm" + "gdrive-netbsd-x64" + "gdrive-netbsd-386" + "gdrive-netbsd-arm" + "gdrive-openbsd-x64" + "gdrive-openbsd-386" + "gdrive-openbsd-arm" + "gdrive-solaris-x64" + "gdrive-plan9-x64" + "gdrive-plan9-386" +) + +# Note: associative array requires bash 4+ +declare -A descriptions +descriptions=( + ["gdrive-osx-x64"]="OS X 64-bit" + ["gdrive-osx-386"]="OS X 32-bit" + ["gdrive-osx-arm"]="OS X arm" + ["gdrive-linux-x64"]="Linux 64-bit" + ["gdrive-linux-386"]="Linux 32-bit" + ["gdrive-linux-rpi"]="Linux Raspberry Pi" + ["gdrive-linux-arm64"]="Linux arm 64-bit" + ["gdrive-linux-arm"]="Linux arm 32-bit" + ["gdrive-linux-mips64"]="Linux mips 64-bit" + ["gdrive-linux-mips64le"]="Linux mips 64-bit le" + ["gdrive-linux-ppc64"]="Linux PPC 64-bit" + ["gdrive-linux-ppc64le"]="Linux PPC 64-bit le" + ["gdrive-windows-386.exe"]="Window 32-bit" + ["gdrive-windows-x64.exe"]="Windows 64-bit" + ["gdrive-dragonfly-x64"]="DragonFly BSD 64-bit" + ["gdrive-freebsd-x64"]="FreeBSD 64-bit" + ["gdrive-freebsd-386"]="FreeBSD 32-bit" + ["gdrive-freebsd-arm"]="FreeBSD arm" + ["gdrive-netbsd-x64"]="NetBSD 64-bit" + ["gdrive-netbsd-386"]="NetBSD 32-bit" + ["gdrive-netbsd-arm"]="NetBSD arm" + ["gdrive-openbsd-x64"]="OpenBSD 64-bit" + ["gdrive-openbsd-386"]="OpenBSD 32-bit" + ["gdrive-openbsd-arm"]="OpenBSD arm" + ["gdrive-solaris-x64"]="Solaris 64-bit" + ["gdrive-plan9-x64"]="Plan9 64-bit" + ["gdrive-plan9-386"]="Plan9 32-bit" +) # Markdown helpers -HEADER='### Downloads' -ROW_TEMPLATE='- [{{name}}]({{url}})' +HEADER='### Downloads +| Filename | Version | Description | Shasum | +|:-----------------------|:--------|:-------------------|:-----------------------------------------|' -# Grab application version -VERSION=$(_release/bin/drive-osx-x64 --version | awk '{print $2}' | sed -e 's/v//') +ROW_TEMPLATE="| [{{name}}]({{url}}) | $VERSION | {{description}} | {{sha}} |" -# Print markdown header + +# Print header echo "$HEADER" -for bin_path in _release/bin/drive-*; do +for name in ${filenames[@]}; do + bin_path="_release/bin/$name" + # Upload file - URL=$(drive upload --file $bin_path --share | awk '/https/ {print $9}') + url=$(gdrive upload --share $bin_path | awk '/https/ {print $7}') + + # Shasum + sha="$(shasum -b $bin_path | awk '{print $1}')" + + # Filename + name="$(basename $bin_path)" + + # Render markdown row + row=${ROW_TEMPLATE//"{{name}}"/$name} + row=${row//"{{url}}"/$url} + row=${row//"{{description}}"/${descriptions[$name]}} + row=${row//"{{sha}}"/$sha} - # Render markdown row and print to screen - NAME="$(basename $bin_path) v${VERSION}" - ROW=${ROW_TEMPLATE//"{{name}}"/$NAME} - ROW=${ROW//"{{url}}"/$URL} - echo "$ROW" + # Print row + echo "$row" done From 47d6a0c67560f00a51eacc1d2d7ce8f2bdc2a55c Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Tue, 23 Feb 2016 00:13:39 +0100 Subject: [PATCH 155/195] 2.0 download links --- README.md | 39 +++++++++++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 1bc3bdff..44aa6f02 100644 --- a/README.md +++ b/README.md @@ -23,16 +23,35 @@ Example: `GDRIVE_CONFIG_DIR="/home/user/.gdrive-secondary" gdrive list` You will be prompted for a new verification code if the folder does not exist. ### Downloads -- [drive-freebsd-386 v1.9.0](https://drive.google.com/uc?id=0B3X9GlR6EmbnNkdVSU1oNUoyQ0U) -- [drive-freebsd-x64 v1.9.0](https://drive.google.com/uc?id=0B3X9GlR6EmbnZTdTTlM2Y1ViV1E) -- [drive-linux-386 v1.9.0](https://drive.google.com/uc?id=0B3X9GlR6EmbnUWZGRmYxVUU2M00) -- [drive-linux-arm v1.9.0](https://drive.google.com/uc?id=0B3X9GlR6EmbnajNmMVU1TDdIWmc) -- [drive-linux-rpi v1.9.0](https://drive.google.com/uc?id=0B3X9GlR6EmbnTVh2SlQyN1FPM3c) -- [drive-linux-x64 v1.9.0](https://drive.google.com/uc?id=0B3X9GlR6Embnb095MGxEYmJhY2c) -- [drive-osx-386 v1.9.0](https://drive.google.com/uc?id=0B3X9GlR6EmbnbEpXdlhza25zT1U) -- [drive-osx-x64 v1.9.0](https://drive.google.com/uc?id=0B3X9GlR6EmbnVjIzMDRqck1aekE) -- [drive-windows-386.exe v1.9.0](https://drive.google.com/uc?id=0B3X9GlR6EmbnTXlSc1FqV1dvSTQ) -- [drive-windows-x64.exe v1.9.0](https://drive.google.com/uc?id=0B3X9GlR6EmbnZ3gyeGw4d3ozbUk) +| Filename | Version | Description | Shasum | +|:-----------------------|:--------|:-------------------|:-----------------------------------------| +| [gdrive-osx-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbndWh2eWduUXpSeFE&export=download) | 2.0.0 | OS X 64-bit | b4619cb9f8862fb86c5938ee65047d0995b71ea5 | +| [gdrive-osx-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnQkU5MEt1OUZqVnM&export=download) | 2.0.0 | OS X 32-bit | a2fee3ff9f45129110ac98c315a7c4c1ecd877ba | +| [gdrive-osx-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnSzhqdmxMS1NfZmc&export=download) | 2.0.0 | OS X arm | d4912d2840d02aef9d9c07b322a850e624d0f960 | +| [gdrive-linux-x64](https://docs.google.com/uc?id=0B3X9GlR6Embnc245MW5fTjA5ZVE&export=download) | 2.0.0 | Linux 64-bit | fb0ac1ba786ce05918f31892b8a5b4f365067ec3 | +| [gdrive-linux-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnT1QyOUpwbldjVU0&export=download) | 2.0.0 | Linux 32-bit | 79dd6d97871bb7f28389d10e9f8d38540ed5dbbe | +| [gdrive-linux-rpi](https://docs.google.com/uc?id=0B3X9GlR6Embnby1GdGpES2F0ck0&export=download) | 2.0.0 | Linux Raspberry Pi | 2ae7a9864203cdc01951da6a4a11b9fd451287bf | +| [gdrive-linux-arm64](https://docs.google.com/uc?id=0B3X9GlR6EmbnX29jQlZtSzNPS1k&export=download) | 2.0.0 | Linux arm 64-bit | f0e2d3de0efce11fe928f3f1529d4d33d31e6155 | +| [gdrive-linux-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnZ0N1Tm94bzBkU2M&export=download) | 2.0.0 | Linux arm 32-bit | 62024c24275055d2d27aa035a8d67fcb5b5b4186 | +| [gdrive-linux-mips64](https://docs.google.com/uc?id=0B3X9GlR6EmbnT2tLYzYzYWJYeTQ&export=download) | 2.0.0 | Linux mips 64-bit | 9433bdda77a40f0488fa35038975783ec172ac70 | +| [gdrive-linux-mips64le](https://docs.google.com/uc?id=0B3X9GlR6EmbnVTgyR2I0dklCSGs&export=download) | 2.0.0 | Linux mips 64-bit le | 77eeff7470fbfd38082edbf50ecf0ac422d4047e | +| [gdrive-linux-ppc64](https://docs.google.com/uc?id=0B3X9GlR6EmbnNHRCSFE0WGhEMUE&export=download) | 2.0.0 | Linux PPC 64-bit | 250c12252696e88235fbe23202a38ed659ddebc2 | +| [gdrive-linux-ppc64le](https://docs.google.com/uc?id=0B3X9GlR6EmbnTm9PSTdnbDV2Y1k&export=download) | 2.0.0 | Linux PPC 64-bit le | 335831aceaba25fbb0a97724f8b4bbdde03e106e | +| [gdrive-windows-386.exe](https://docs.google.com/uc?id=0B3X9GlR6EmbnSk9ObTV1MXBweGM&export=download) | 2.0.0 | Window 32-bit | 3131d396d40afda901e3967e4505d600bf30c026 | +| [gdrive-windows-x64.exe](https://docs.google.com/uc?id=0B3X9GlR6EmbnTGV6bVkwZFJoQ1k&export=download) | 2.0.0 | Windows 64-bit | 64369fb913e686576ca028bb0e180e0a7d2f9ce2 | +| [gdrive-dragonfly-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnVWh2NjR3UVVybUU&export=download) | 2.0.0 | DragonFly BSD 64-bit | 6d34e8c6d5e5327a70e0201a9a299224696287cd | +| [gdrive-freebsd-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnaWtER0hFYXNMOEk&export=download) | 2.0.0 | FreeBSD 64-bit | 7c0b3582601e095498b8fe8199d95405e64f4b37 | +| [gdrive-freebsd-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnVVBSTVpHajBWT1E&export=download) | 2.0.0 | FreeBSD 32-bit | 2c8abd7a36cfe3c26bfaac04f3f8c517c2ee15a3 | +| [gdrive-freebsd-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnY1JKdHlyc200WlE&export=download) | 2.0.0 | FreeBSD arm | ff320433a61a297cbb9dcca22ee8c5ae425b5cca | +| [gdrive-netbsd-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnU193SUtIV3lJNUE&export=download) | 2.0.0 | NetBSD 64-bit | 1cf653ee839be342f3442d3bd4df7eceab73184d | +| [gdrive-netbsd-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnWGt1elRlN2hRV0k&export=download) | 2.0.0 | NetBSD 32-bit | 7452be3226db3955688e509e4be0d91d8a24a7a9 | +| [gdrive-netbsd-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnX2J4VWdPLW1xLW8&export=download) | 2.0.0 | NetBSD arm | 6c93d33bbebbced9bddeb6adc95a0ebf5f4e0f2c | +| [gdrive-openbsd-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbndG5xZ21uYWFWYXc&export=download) | 2.0.0 | OpenBSD 64-bit | b62987a0e792f39f977b03f91b33c4b7a9461e07 | +| [gdrive-openbsd-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnQlBHMzdvR3ZhcHc&export=download) | 2.0.0 | OpenBSD 32-bit | 85bdbc4fb4a85fa05e4bc48897d3f5732882365a | +| [gdrive-openbsd-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnRGV2YzNKRFhQaVE&export=download) | 2.0.0 | OpenBSD arm | 2a356bc8310ce79b5b43e5d4328df8f899aa725f | +| [gdrive-solaris-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnNHVSalRqaHUwQTg&export=download) | 2.0.0 | Solaris 64-bit | 48d279de6c35867f096e0e5a79cb136fc4ec6135 | +| [gdrive-plan9-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnN0gwSkFoM0Rfa0E&export=download) | 2.0.0 | Plan9 64-bit | ae938d297ff7c6fb57fd999b81b0af88d62a7a94 | +| [gdrive-plan9-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnRnZTMU1USzM2RE0&export=download) | 2.0.0 | Plan9 32-bit | db16af581cd711b86d27e2777db157a97efaaacc | ## Compile from source ```bash From 78d3a1c6c2ee7f42ca926996cc0632cb540368d1 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Tue, 23 Feb 2016 23:49:19 +0100 Subject: [PATCH 156/195] Handle error --- drive/download.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drive/download.go b/drive/download.go index 5fa121d2..0856258b 100644 --- a/drive/download.go +++ b/drive/download.go @@ -40,6 +40,9 @@ func (self *Drive) Download(args DownloadArgs) error { } bytes, rate, err := self.downloadBinary(f, args) + if err != nil { + return err + } if !args.Stdout { fmt.Fprintf(args.Out, "Downloaded %s at %s/s, total %s\n", f.Id, formatSize(rate, false), formatSize(bytes, false)) From 93bce13d22e7c0578cfe613abe011f1ae8e66e45 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Tue, 23 Feb 2016 23:50:00 +0100 Subject: [PATCH 157/195] v2.0.1 --- gdrive.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gdrive.go b/gdrive.go index c510418d..c9a04f6a 100644 --- a/gdrive.go +++ b/gdrive.go @@ -7,7 +7,7 @@ import ( ) const Name = "gdrive" -const Version = "2.0.0" +const Version = "2.0.1" const DefaultMaxFiles = 30 const DefaultMaxChanges = 100 From 677d980b52d92b010984dfbbba23459ce7237d67 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Wed, 24 Feb 2016 00:09:14 +0100 Subject: [PATCH 158/195] 2.0.1 download links --- README.md | 54 +++++++++++++++++++++++++++--------------------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/README.md b/README.md index 44aa6f02..e46f1395 100644 --- a/README.md +++ b/README.md @@ -25,33 +25,33 @@ You will be prompted for a new verification code if the folder does not exist. ### Downloads | Filename | Version | Description | Shasum | |:-----------------------|:--------|:-------------------|:-----------------------------------------| -| [gdrive-osx-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbndWh2eWduUXpSeFE&export=download) | 2.0.0 | OS X 64-bit | b4619cb9f8862fb86c5938ee65047d0995b71ea5 | -| [gdrive-osx-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnQkU5MEt1OUZqVnM&export=download) | 2.0.0 | OS X 32-bit | a2fee3ff9f45129110ac98c315a7c4c1ecd877ba | -| [gdrive-osx-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnSzhqdmxMS1NfZmc&export=download) | 2.0.0 | OS X arm | d4912d2840d02aef9d9c07b322a850e624d0f960 | -| [gdrive-linux-x64](https://docs.google.com/uc?id=0B3X9GlR6Embnc245MW5fTjA5ZVE&export=download) | 2.0.0 | Linux 64-bit | fb0ac1ba786ce05918f31892b8a5b4f365067ec3 | -| [gdrive-linux-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnT1QyOUpwbldjVU0&export=download) | 2.0.0 | Linux 32-bit | 79dd6d97871bb7f28389d10e9f8d38540ed5dbbe | -| [gdrive-linux-rpi](https://docs.google.com/uc?id=0B3X9GlR6Embnby1GdGpES2F0ck0&export=download) | 2.0.0 | Linux Raspberry Pi | 2ae7a9864203cdc01951da6a4a11b9fd451287bf | -| [gdrive-linux-arm64](https://docs.google.com/uc?id=0B3X9GlR6EmbnX29jQlZtSzNPS1k&export=download) | 2.0.0 | Linux arm 64-bit | f0e2d3de0efce11fe928f3f1529d4d33d31e6155 | -| [gdrive-linux-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnZ0N1Tm94bzBkU2M&export=download) | 2.0.0 | Linux arm 32-bit | 62024c24275055d2d27aa035a8d67fcb5b5b4186 | -| [gdrive-linux-mips64](https://docs.google.com/uc?id=0B3X9GlR6EmbnT2tLYzYzYWJYeTQ&export=download) | 2.0.0 | Linux mips 64-bit | 9433bdda77a40f0488fa35038975783ec172ac70 | -| [gdrive-linux-mips64le](https://docs.google.com/uc?id=0B3X9GlR6EmbnVTgyR2I0dklCSGs&export=download) | 2.0.0 | Linux mips 64-bit le | 77eeff7470fbfd38082edbf50ecf0ac422d4047e | -| [gdrive-linux-ppc64](https://docs.google.com/uc?id=0B3X9GlR6EmbnNHRCSFE0WGhEMUE&export=download) | 2.0.0 | Linux PPC 64-bit | 250c12252696e88235fbe23202a38ed659ddebc2 | -| [gdrive-linux-ppc64le](https://docs.google.com/uc?id=0B3X9GlR6EmbnTm9PSTdnbDV2Y1k&export=download) | 2.0.0 | Linux PPC 64-bit le | 335831aceaba25fbb0a97724f8b4bbdde03e106e | -| [gdrive-windows-386.exe](https://docs.google.com/uc?id=0B3X9GlR6EmbnSk9ObTV1MXBweGM&export=download) | 2.0.0 | Window 32-bit | 3131d396d40afda901e3967e4505d600bf30c026 | -| [gdrive-windows-x64.exe](https://docs.google.com/uc?id=0B3X9GlR6EmbnTGV6bVkwZFJoQ1k&export=download) | 2.0.0 | Windows 64-bit | 64369fb913e686576ca028bb0e180e0a7d2f9ce2 | -| [gdrive-dragonfly-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnVWh2NjR3UVVybUU&export=download) | 2.0.0 | DragonFly BSD 64-bit | 6d34e8c6d5e5327a70e0201a9a299224696287cd | -| [gdrive-freebsd-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnaWtER0hFYXNMOEk&export=download) | 2.0.0 | FreeBSD 64-bit | 7c0b3582601e095498b8fe8199d95405e64f4b37 | -| [gdrive-freebsd-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnVVBSTVpHajBWT1E&export=download) | 2.0.0 | FreeBSD 32-bit | 2c8abd7a36cfe3c26bfaac04f3f8c517c2ee15a3 | -| [gdrive-freebsd-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnY1JKdHlyc200WlE&export=download) | 2.0.0 | FreeBSD arm | ff320433a61a297cbb9dcca22ee8c5ae425b5cca | -| [gdrive-netbsd-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnU193SUtIV3lJNUE&export=download) | 2.0.0 | NetBSD 64-bit | 1cf653ee839be342f3442d3bd4df7eceab73184d | -| [gdrive-netbsd-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnWGt1elRlN2hRV0k&export=download) | 2.0.0 | NetBSD 32-bit | 7452be3226db3955688e509e4be0d91d8a24a7a9 | -| [gdrive-netbsd-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnX2J4VWdPLW1xLW8&export=download) | 2.0.0 | NetBSD arm | 6c93d33bbebbced9bddeb6adc95a0ebf5f4e0f2c | -| [gdrive-openbsd-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbndG5xZ21uYWFWYXc&export=download) | 2.0.0 | OpenBSD 64-bit | b62987a0e792f39f977b03f91b33c4b7a9461e07 | -| [gdrive-openbsd-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnQlBHMzdvR3ZhcHc&export=download) | 2.0.0 | OpenBSD 32-bit | 85bdbc4fb4a85fa05e4bc48897d3f5732882365a | -| [gdrive-openbsd-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnRGV2YzNKRFhQaVE&export=download) | 2.0.0 | OpenBSD arm | 2a356bc8310ce79b5b43e5d4328df8f899aa725f | -| [gdrive-solaris-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnNHVSalRqaHUwQTg&export=download) | 2.0.0 | Solaris 64-bit | 48d279de6c35867f096e0e5a79cb136fc4ec6135 | -| [gdrive-plan9-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnN0gwSkFoM0Rfa0E&export=download) | 2.0.0 | Plan9 64-bit | ae938d297ff7c6fb57fd999b81b0af88d62a7a94 | -| [gdrive-plan9-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnRnZTMU1USzM2RE0&export=download) | 2.0.0 | Plan9 32-bit | db16af581cd711b86d27e2777db157a97efaaacc | +| [gdrive-osx-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnZUM0bXRBeUpYQ00&export=download) | 2.0.1 | OS X 64-bit | 180bc98408c7ec6deac6a66bbd9c307c4348ae6f | +| [gdrive-osx-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnM05hQXhvWm9EOHM&export=download) | 2.0.1 | OS X 32-bit | 366ee217d4327a1855245d8c4a1204f4831eb979 | +| [gdrive-osx-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnelYxRU5LbEVfVzQ&export=download) | 2.0.1 | OS X arm | cdc31f83e50560a7f8fbf8a25b9c87c945d1407f | +| [gdrive-linux-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnWksyTEtCM0VfaFE&export=download) | 2.0.1 | Linux 64-bit | c636778c4a2c76e47ac731c142f4219a19c30263 | +| [gdrive-linux-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnbndOUW50ZVllZ3M&export=download) | 2.0.1 | Linux 32-bit | 0968993e4a70a594e0f315034640fd811977e4f1 | +| [gdrive-linux-rpi](https://docs.google.com/uc?id=0B3X9GlR6EmbnbVRIelM5SG5zcmM&export=download) | 2.0.1 | Linux Raspberry Pi | 7865a1e96e70791aa0f33cb758e6cda7886be240 | +| [gdrive-linux-arm64](https://docs.google.com/uc?id=0B3X9GlR6EmbnNkxrQ3VzOGNMNWc&export=download) | 2.0.1 | Linux arm 64-bit | 00b293e4501da64e956d32b7c1589a014b541abe | +| [gdrive-linux-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnLW51cVJrazVLOG8&export=download) | 2.0.1 | Linux arm 32-bit | 3577a6462dafc47823d5ed053a978af84a99c5af | +| [gdrive-linux-mips64](https://docs.google.com/uc?id=0B3X9GlR6EmbnTS1fejFtaDMyLU0&export=download) | 2.0.1 | Linux mips 64-bit | e1e5992a5635467b84f149435544d980e99f30c6 | +| [gdrive-linux-mips64le](https://docs.google.com/uc?id=0B3X9GlR6EmbnV3pJU0ZBTEg0a2M&export=download) | 2.0.1 | Linux mips 64-bit le | 8273d53fc21b6028de781958c6f224f41c0a92db | +| [gdrive-linux-ppc64](https://docs.google.com/uc?id=0B3X9GlR6EmbnOXNPM01ITHl3NUk&export=download) | 2.0.1 | Linux PPC 64-bit | fc1409b9960ae4a209331e40a4cd9b10a789f8e6 | +| [gdrive-linux-ppc64le](https://docs.google.com/uc?id=0B3X9GlR6EmbnRVBCRlpXNl95bW8&export=download) | 2.0.1 | Linux PPC 64-bit le | 7a09ed4b43c31198efdf4e4a7da1e196cd3cd54f | +| [gdrive-windows-386.exe](https://docs.google.com/uc?id=0B3X9GlR6EmbncnpncGpZX0pULUU&export=download) | 2.0.1 | Window 32-bit | 134480ca113d03b91dbfa43326704b57f07ca547 | +| [gdrive-windows-x64.exe](https://docs.google.com/uc?id=0B3X9GlR6EmbncWNLOS1KYWhLVFE&export=download) | 2.0.1 | Windows 64-bit | c68df6e77aa7fa412bfe318ab270e1245a24966b | +| [gdrive-dragonfly-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnX1FiZzFaN1hRekk&export=download) | 2.0.1 | DragonFly BSD 64-bit | 0116d291a859152a4af842254eb88102c18f9ad6 | +| [gdrive-freebsd-x64](https://docs.google.com/uc?id=0B3X9GlR6Embnck8wR0ozV3J2WHM&export=download) | 2.0.1 | FreeBSD 64-bit | 0ab7d8509efcbbf41a4c684f9942c749706ea8ab | +| [gdrive-freebsd-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnVUtwX010YUlENWs&export=download) | 2.0.1 | FreeBSD 32-bit | 4e1ac2aeeed59df1d8636641b796aad54ade42d2 | +| [gdrive-freebsd-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbncXV3SU9NZDc3X3M&export=download) | 2.0.1 | FreeBSD arm | 1220f7f75579b28205d302f1e8ad0faeefc5d188 | +| [gdrive-netbsd-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnUk5JMVFiS0pKZW8&export=download) | 2.0.1 | NetBSD 64-bit | dba9e9f57b6ed3c370961e95efaea1ed0ea4429f | +| [gdrive-netbsd-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnLWZRTy1LX1kxUkE&export=download) | 2.0.1 | NetBSD 32-bit | 8303ec4f0f7ba2acecc4509e0e73f8bf1eb2cd68 | +| [gdrive-netbsd-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnQUNFOWxBYmdjUEk&export=download) | 2.0.1 | NetBSD arm | 84aeb602e0cfbb09a35fff97f2af5990673f9bee | +| [gdrive-openbsd-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnYk50VlVuNUhuaVE&export=download) | 2.0.1 | OpenBSD 64-bit | 729794ef7cfc320cab84fd78e3113f5fdd476ac6 | +| [gdrive-openbsd-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnOExCLVNYakJiSUk&export=download) | 2.0.1 | OpenBSD 32-bit | a1f45252d86ae238ce17b49c226b218aec805a02 | +| [gdrive-openbsd-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnN1htLTBSSElRUDQ&export=download) | 2.0.1 | OpenBSD arm | 5a732b15a4d36e61768388e323807e9a2fccb4bc | +| [gdrive-solaris-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnSEpJSDBidHNDNkE&export=download) | 2.0.1 | Solaris 64-bit | 82dc342873693b37302fc8f3cb97a667bae6e41c | +| [gdrive-plan9-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnZWMzR3pyZlZ0cFU&export=download) | 2.0.1 | Plan9 64-bit | e92e4a25517116c4fd466142baab03e0b9d11772 | +| [gdrive-plan9-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnOXV4d0dNeGZCR00&export=download) | 2.0.1 | Plan9 32-bit | efdfced751ca43995ad28dc1aa96b29a8c237433 | ## Compile from source ```bash From 496172358e780e984d125ed7d8867d287a03c293 Mon Sep 17 00:00:00 2001 From: Geoff Yoerger Date: Fri, 26 Feb 2016 22:06:00 -0600 Subject: [PATCH 159/195] Make `go get` work Make `go get` work --- README.md | 5 +---- compare.go | 2 +- gdrive.go | 2 +- handlers_drive.go | 6 +++--- handlers_meta.go | 2 +- 5 files changed, 7 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index e46f1395..f8d66fac 100644 --- a/README.md +++ b/README.md @@ -55,10 +55,7 @@ You will be prompted for a new verification code if the folder does not exist. ## Compile from source ```bash -git clone https://github.com/prasmussen/gdrive.git -cd gdrive -go get ./... -go build -o gdrive +go get github.com/prasmussen/gdrive ``` ## Gdrive 2 diff --git a/compare.go b/compare.go index 7dd8c86f..ac682849 100644 --- a/compare.go +++ b/compare.go @@ -1,7 +1,7 @@ package main import ( - "./drive" + "github.com/prasmussen/gdrive/drive" "encoding/json" "os" ) diff --git a/gdrive.go b/gdrive.go index c9a04f6a..c68d43dd 100644 --- a/gdrive.go +++ b/gdrive.go @@ -1,7 +1,7 @@ package main import ( - "./cli" + "github.com/prasmussen/gdrive/cli" "fmt" "os" ) diff --git a/handlers_drive.go b/handlers_drive.go index baaf050f..f0386ce5 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -1,9 +1,9 @@ package main import ( - "./auth" - "./cli" - "./drive" + "github.com/prasmussen/gdrive/auth" + "github.com/prasmussen/gdrive/cli" + "github.com/prasmussen/gdrive/drive" "fmt" "io" "io/ioutil" diff --git a/handlers_meta.go b/handlers_meta.go index 72e3dd0f..bfd5b8f3 100644 --- a/handlers_meta.go +++ b/handlers_meta.go @@ -1,7 +1,7 @@ package main import ( - "./cli" + "github.com/prasmussen/gdrive/cli" "fmt" "os" "runtime" From 401e017c5e821ba1dff9bc7e45e809b63c800192 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 27 Feb 2016 10:33:49 +0100 Subject: [PATCH 160/195] Add gdrive binary location --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index f8d66fac..37a097e7 100644 --- a/README.md +++ b/README.md @@ -57,6 +57,8 @@ You will be prompted for a new verification code if the folder does not exist. ```bash go get github.com/prasmussen/gdrive ``` +The gdrive binary should now be available at `$GOPATH/bin/gdrive` + ## Gdrive 2 Gdrive 2 is more or less a full rewrite and is not backwards compatible From 1cab8ce85ee62a08787167b1d0c68782f7249593 Mon Sep 17 00:00:00 2001 From: app-git-hub Date: Wed, 6 Apr 2016 11:56:18 +0530 Subject: [PATCH 161/195] Clarifies the phrase 'first time gdrive is launched'. Newbie users, like myself, compiled several versions of gdrive because we were not getting the auth. url when running `gdrive` from terminal as pointed out by the phrase "The first time gdrive is launched ... you will be prompted for a verification code". --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 37a097e7..88d87fc4 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,8 @@ If you want to compile from source you need the [go toolchain](http://golang.org ## Installation Download `gdrive` from one of the links below. On unix systems run `chmod +x gdrive` after download to make the binary executable. -The first time gdrive is launched, you will be prompted for a verification code. +The first time gdrive is launched (i.e. run `gdrive about` in your +terminal not just `gdrive`), you will be prompted for a verification code. The code is obtained by following the printed url and authenticating with the google account for the drive you want access to. This will create a token file inside the .gdrive folder in your home directory. Note that anyone with access From 0e1057e475525536dce2db4754e9d9840ab086f2 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 9 Apr 2016 18:04:39 +0200 Subject: [PATCH 162/195] Configurable timeout as argument #127 --- .gitignore | 1 + compare.go | 2 +- drive/download.go | 3 ++- drive/revision_download.go | 4 +++- drive/sync_download.go | 3 ++- drive/sync_upload.go | 5 +++-- drive/timeout_reader.go | 44 +++++++++++++++++++++++-------------- drive/update.go | 3 ++- drive/upload.go | 6 +++-- gdrive.go | 45 +++++++++++++++++++++++++++++++++++++- handlers_drive.go | 14 +++++++++++- handlers_meta.go | 2 +- 12 files changed, 104 insertions(+), 28 deletions(-) diff --git a/.gitignore b/.gitignore index ee3fac54..62df61af 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,4 @@ _release/bin Session.vim .netrwhist gdrive +gdrive.sh diff --git a/compare.go b/compare.go index ac682849..35c03690 100644 --- a/compare.go +++ b/compare.go @@ -1,8 +1,8 @@ package main import ( - "github.com/prasmussen/gdrive/drive" "encoding/json" + "github.com/prasmussen/gdrive/drive" "os" ) diff --git a/drive/download.go b/drive/download.go index 0856258b..3cb83100 100644 --- a/drive/download.go +++ b/drive/download.go @@ -19,6 +19,7 @@ type DownloadArgs struct { Recursive bool Delete bool Stdout bool + Timeout time.Duration } func (self *Drive) Download(args DownloadArgs) error { @@ -120,7 +121,7 @@ func (self *Drive) downloadRecursive(args DownloadArgs) error { func (self *Drive) downloadBinary(f *drive.File, args DownloadArgs) (int64, int64, error) { // Get timeout reader wrapper and context - timeoutReaderWrapper, ctx := getTimeoutReaderWrapperContext() + timeoutReaderWrapper, ctx := getTimeoutReaderWrapperContext(args.Timeout) res, err := self.service.Files.Get(f.Id).Context(ctx).Download() if err != nil { diff --git a/drive/revision_download.go b/drive/revision_download.go index 04055fae..57392a5d 100644 --- a/drive/revision_download.go +++ b/drive/revision_download.go @@ -5,6 +5,7 @@ import ( "io" "io/ioutil" "path/filepath" + "time" ) type DownloadRevisionArgs struct { @@ -15,6 +16,7 @@ type DownloadRevisionArgs struct { Path string Force bool Stdout bool + Timeout time.Duration } func (self *Drive) DownloadRevision(args DownloadRevisionArgs) (err error) { @@ -30,7 +32,7 @@ func (self *Drive) DownloadRevision(args DownloadRevisionArgs) (err error) { } // Get timeout reader wrapper and context - timeoutReaderWrapper, ctx := getTimeoutReaderWrapperContext() + timeoutReaderWrapper, ctx := getTimeoutReaderWrapperContext(args.Timeout) res, err := getRev.Context(ctx).Download() if err != nil { diff --git a/drive/sync_download.go b/drive/sync_download.go index 04b50b95..2494557d 100644 --- a/drive/sync_download.go +++ b/drive/sync_download.go @@ -19,6 +19,7 @@ type DownloadSyncArgs struct { Path string DryRun bool DeleteExtraneous bool + Timeout time.Duration Resolution ConflictResolution Comparer FileComparer } @@ -188,7 +189,7 @@ func (self *Drive) downloadRemoteFile(id, fpath string, args DownloadSyncArgs, t } // Get timeout reader wrapper and context - timeoutReaderWrapper, ctx := getTimeoutReaderWrapperContext() + timeoutReaderWrapper, ctx := getTimeoutReaderWrapperContext(args.Timeout) res, err := self.service.Files.Get(id).Context(ctx).Download() if err != nil { diff --git a/drive/sync_upload.go b/drive/sync_upload.go index 0d5c2085..c509c0a6 100644 --- a/drive/sync_upload.go +++ b/drive/sync_upload.go @@ -20,6 +20,7 @@ type UploadSyncArgs struct { DryRun bool DeleteExtraneous bool ChunkSize int64 + Timeout time.Duration Resolution ConflictResolution Comparer FileComparer } @@ -308,7 +309,7 @@ func (self *Drive) uploadMissingFile(parentId string, lf *LocalFile, args Upload progressReader := getProgressReader(srcFile, args.Progress, lf.info.Size()) // Wrap reader in timeout reader - reader, ctx := getTimeoutReaderContext(progressReader) + reader, ctx := getTimeoutReaderContext(progressReader, args.Timeout) _, err = self.service.Files.Create(dstFile).Fields("id", "name", "size", "md5Checksum").Context(ctx).Media(reader, chunkSize).Do() if err != nil { @@ -347,7 +348,7 @@ func (self *Drive) updateChangedFile(cf *changedFile, args UploadSyncArgs, try i progressReader := getProgressReader(srcFile, args.Progress, cf.local.info.Size()) // Wrap reader in timeout reader - reader, ctx := getTimeoutReaderContext(progressReader) + reader, ctx := getTimeoutReaderContext(progressReader, args.Timeout) _, err = self.service.Files.Update(cf.remote.file.Id, dstFile).Context(ctx).Media(reader, chunkSize).Do() if err != nil { diff --git a/drive/timeout_reader.go b/drive/timeout_reader.go index 9930c12c..67fd5b0e 100644 --- a/drive/timeout_reader.go +++ b/drive/timeout_reader.go @@ -7,39 +7,51 @@ import ( "time" ) -const MaxIdleTimeout = time.Second * 120 const TimeoutTimerInterval = time.Second * 10 type timeoutReaderWrapper func(io.Reader) io.Reader -func getTimeoutReaderWrapperContext() (timeoutReaderWrapper, context.Context) { +func getTimeoutReaderWrapperContext(timeout time.Duration) (timeoutReaderWrapper, context.Context) { ctx, cancel := context.WithCancel(context.TODO()) wrapper := func(r io.Reader) io.Reader { - return getTimeoutReader(r, cancel) + // Return untouched reader if timeout is 0 + if timeout == 0 { + return r + } + + return getTimeoutReader(r, cancel, timeout) } return wrapper, ctx } -func getTimeoutReaderContext(r io.Reader) (io.Reader, context.Context) { +func getTimeoutReaderContext(r io.Reader, timeout time.Duration) (io.Reader, context.Context) { ctx, cancel := context.WithCancel(context.TODO()) - return getTimeoutReader(r, cancel), ctx + + // Return untouched reader if timeout is 0 + if timeout == 0 { + return r, ctx + } + + return getTimeoutReader(r, cancel, timeout), ctx } -func getTimeoutReader(r io.Reader, cancel context.CancelFunc) io.Reader { +func getTimeoutReader(r io.Reader, cancel context.CancelFunc, timeout time.Duration) io.Reader { return &TimeoutReader{ - reader: r, - cancel: cancel, - mutex: &sync.Mutex{}, + reader: r, + cancel: cancel, + mutex: &sync.Mutex{}, + maxIdleTimeout: timeout, } } type TimeoutReader struct { - reader io.Reader - cancel context.CancelFunc - lastActivity time.Time - timer *time.Timer - mutex *sync.Mutex - done bool + reader io.Reader + cancel context.CancelFunc + lastActivity time.Time + timer *time.Timer + mutex *sync.Mutex + maxIdleTimeout time.Duration + done bool } func (self *TimeoutReader) Read(p []byte) (int, error) { @@ -90,7 +102,7 @@ func (self *TimeoutReader) timeout() { return } - if time.Since(self.lastActivity) > MaxIdleTimeout { + if time.Since(self.lastActivity) > self.maxIdleTimeout { self.cancel() self.mutex.Unlock() return diff --git a/drive/update.go b/drive/update.go index 156eb2f0..7af403ff 100644 --- a/drive/update.go +++ b/drive/update.go @@ -20,6 +20,7 @@ type UpdateArgs struct { Mime string Recursive bool ChunkSize int64 + Timeout time.Duration } func (self *Drive) Update(args UpdateArgs) error { @@ -57,7 +58,7 @@ func (self *Drive) Update(args UpdateArgs) error { progressReader := getProgressReader(srcFile, args.Progress, srcFileInfo.Size()) // Wrap reader in timeout reader - reader, ctx := getTimeoutReaderContext(progressReader) + reader, ctx := getTimeoutReaderContext(progressReader, args.Timeout) fmt.Fprintf(args.Out, "Uploading %s\n", args.Path) started := time.Now() diff --git a/drive/upload.go b/drive/upload.go index c42bebdc..6f5edd57 100644 --- a/drive/upload.go +++ b/drive/upload.go @@ -22,6 +22,7 @@ type UploadArgs struct { Share bool Delete bool ChunkSize int64 + Timeout time.Duration } func (self *Drive) Upload(args UploadArgs) error { @@ -173,7 +174,7 @@ func (self *Drive) uploadFile(args UploadArgs) (*drive.File, int64, error) { progressReader := getProgressReader(srcFile, args.Progress, srcFileInfo.Size()) // Wrap reader in timeout reader - reader, ctx := getTimeoutReaderContext(progressReader) + reader, ctx := getTimeoutReaderContext(progressReader, args.Timeout) fmt.Fprintf(args.Out, "Uploading %s\n", args.Path) started := time.Now() @@ -198,6 +199,7 @@ type UploadStreamArgs struct { Share bool ChunkSize int64 Progress io.Writer + Timeout time.Duration } func (self *Drive) UploadStream(args UploadStreamArgs) error { @@ -223,7 +225,7 @@ func (self *Drive) UploadStream(args UploadStreamArgs) error { progressReader := getProgressReader(args.In, args.Progress, 0) // Wrap reader in timeout reader - reader, ctx := getTimeoutReaderContext(progressReader) + reader, ctx := getTimeoutReaderContext(progressReader, args.Timeout) fmt.Fprintf(args.Out, "Uploading %s\n", dstFile.Name) started := time.Now() diff --git a/gdrive.go b/gdrive.go index c68d43dd..e7d28cf7 100644 --- a/gdrive.go +++ b/gdrive.go @@ -1,8 +1,8 @@ package main import ( - "github.com/prasmussen/gdrive/cli" "fmt" + "github.com/prasmussen/gdrive/cli" "os" ) @@ -14,6 +14,7 @@ const DefaultMaxChanges = 100 const DefaultNameWidth = 40 const DefaultPathWidth = 60 const DefaultUploadChunkSize = 8 * 1024 * 1024 +const DefaultTimeout = 5 * 60 const DefaultQuery = "trashed = false and 'me' in owners" const DefaultShareRole = "reader" const DefaultShareType = "anyone" @@ -134,6 +135,12 @@ func main() { Description: "Write file content to stdout", OmitValue: true, }, + cli.IntFlag{ + Name: "timeout", + Patterns: []string{"--timeout"}, + Description: fmt.Sprintf("Set timeout in seconds, use 0 for no timeout. Timeout is reached when no data is transferred in set amount of seconds, default: %d", DefaultTimeout), + DefaultValue: DefaultTimeout, + }, ), }, }, @@ -216,6 +223,12 @@ func main() { Description: "Delete local file when upload is successful", OmitValue: true, }, + cli.IntFlag{ + Name: "timeout", + Patterns: []string{"--timeout"}, + Description: fmt.Sprintf("Set timeout in seconds, use 0 for no timeout. Timeout is reached when no data is transferred in set amount of seconds, default: %d", DefaultTimeout), + DefaultValue: DefaultTimeout, + }, cli.IntFlag{ Name: "chunksize", Patterns: []string{"--chunksize"}, @@ -254,6 +267,12 @@ func main() { Description: "Share file", OmitValue: true, }, + cli.IntFlag{ + Name: "timeout", + Patterns: []string{"--timeout"}, + Description: fmt.Sprintf("Set timeout in seconds, use 0 for no timeout. Timeout is reached when no data is transferred in set amount of seconds, default: %d", DefaultTimeout), + DefaultValue: DefaultTimeout, + }, cli.BoolFlag{ Name: "noProgress", Patterns: []string{"--no-progress"}, @@ -291,6 +310,12 @@ func main() { Patterns: []string{"--mime"}, Description: "Force mime type", }, + cli.IntFlag{ + Name: "timeout", + Patterns: []string{"--timeout"}, + Description: fmt.Sprintf("Set timeout in seconds, use 0 for no timeout. Timeout is reached when no data is transferred in set amount of seconds, default: %d", DefaultTimeout), + DefaultValue: DefaultTimeout, + }, cli.IntFlag{ Name: "chunksize", Patterns: []string{"--chunksize"}, @@ -494,6 +519,12 @@ func main() { Description: "Hide progress", OmitValue: true, }, + cli.IntFlag{ + Name: "timeout", + Patterns: []string{"--timeout"}, + Description: fmt.Sprintf("Set timeout in seconds, use 0 for no timeout. Timeout is reached when no data is transferred in set amount of seconds, default: %d", DefaultTimeout), + DefaultValue: DefaultTimeout, + }, ), }, }, @@ -540,6 +571,12 @@ func main() { Description: "Hide progress", OmitValue: true, }, + cli.IntFlag{ + Name: "timeout", + Patterns: []string{"--timeout"}, + Description: fmt.Sprintf("Set timeout in seconds, use 0 for no timeout. Timeout is reached when no data is transferred in set amount of seconds, default: %d", DefaultTimeout), + DefaultValue: DefaultTimeout, + }, cli.IntFlag{ Name: "chunksize", Patterns: []string{"--chunksize"}, @@ -647,6 +684,12 @@ func main() { Patterns: []string{"--path"}, Description: "Download path", }, + cli.IntFlag{ + Name: "timeout", + Patterns: []string{"--timeout"}, + Description: fmt.Sprintf("Set timeout in seconds, use 0 for no timeout. Timeout is reached when no data is transferred in set amount of seconds, default: %d", DefaultTimeout), + DefaultValue: DefaultTimeout, + }, ), }, }, diff --git a/handlers_drive.go b/handlers_drive.go index f0386ce5..8db7329b 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -1,15 +1,16 @@ package main import ( + "fmt" "github.com/prasmussen/gdrive/auth" "github.com/prasmussen/gdrive/cli" "github.com/prasmussen/gdrive/drive" - "fmt" "io" "io/ioutil" "net/http" "os" "path/filepath" + "time" ) const ClientId = "367116221053-7n0vf5akeru7on6o2fjinrecpdoe99eg.apps.googleusercontent.com" @@ -57,6 +58,7 @@ func downloadHandler(ctx cli.Context) { Recursive: args.Bool("recursive"), Stdout: args.Bool("stdout"), Progress: progressWriter(args.Bool("noProgress")), + Timeout: durationInSeconds(args.Int64("timeout")), }) checkErr(err) } @@ -84,6 +86,7 @@ func downloadSyncHandler(ctx cli.Context) { RootId: args.String("fileId"), DryRun: args.Bool("dryRun"), DeleteExtraneous: args.Bool("deleteExtraneous"), + Timeout: durationInSeconds(args.Int64("timeout")), Resolution: conflictResolution(args), Comparer: NewCachedMd5Comparer(cachePath), }) @@ -100,6 +103,7 @@ func downloadRevisionHandler(ctx cli.Context) { Stdout: args.Bool("stdout"), Path: args.String("path"), Progress: progressWriter(args.Bool("noProgress")), + Timeout: durationInSeconds(args.Int64("timeout")), }) checkErr(err) } @@ -118,6 +122,7 @@ func uploadHandler(ctx cli.Context) { Share: args.Bool("share"), Delete: args.Bool("delete"), ChunkSize: args.Int64("chunksize"), + Timeout: durationInSeconds(args.Int64("timeout")), }) checkErr(err) } @@ -132,6 +137,7 @@ func uploadStdinHandler(ctx cli.Context) { Mime: args.String("mime"), Share: args.Bool("share"), ChunkSize: args.Int64("chunksize"), + Timeout: durationInSeconds(args.Int64("timeout")), Progress: progressWriter(args.Bool("noProgress")), }) checkErr(err) @@ -148,6 +154,7 @@ func uploadSyncHandler(ctx cli.Context) { DryRun: args.Bool("dryRun"), DeleteExtraneous: args.Bool("deleteExtraneous"), ChunkSize: args.Int64("chunksize"), + Timeout: durationInSeconds(args.Int64("timeout")), Resolution: conflictResolution(args), Comparer: NewCachedMd5Comparer(cachePath), }) @@ -165,6 +172,7 @@ func updateHandler(ctx cli.Context) { Mime: args.String("mime"), Progress: progressWriter(args.Bool("noProgress")), ChunkSize: args.Int64("chunksize"), + Timeout: durationInSeconds(args.Int64("timeout")), }) checkErr(err) } @@ -385,6 +393,10 @@ func progressWriter(discard bool) io.Writer { return os.Stderr } +func durationInSeconds(seconds int64) time.Duration { + return time.Second * time.Duration(seconds) +} + func conflictResolution(args cli.Arguments) drive.ConflictResolution { keepLocal := args.Bool("keepLocal") keepRemote := args.Bool("keepRemote") diff --git a/handlers_meta.go b/handlers_meta.go index bfd5b8f3..2e1c95cc 100644 --- a/handlers_meta.go +++ b/handlers_meta.go @@ -1,8 +1,8 @@ package main import ( - "github.com/prasmussen/gdrive/cli" "fmt" + "github.com/prasmussen/gdrive/cli" "os" "runtime" "strings" From 28c5b9beb7b16dc0e1a39fc61163868dcbc02d19 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 9 Apr 2016 18:28:04 +0200 Subject: [PATCH 163/195] Give proper timeout error message --- drive/download.go | 3 +++ drive/errors.go | 5 +++++ drive/revision_download.go | 3 +++ drive/sync_download.go | 2 ++ drive/sync_upload.go | 4 ++++ drive/update.go | 3 +++ drive/upload.go | 6 ++++++ 7 files changed, 26 insertions(+) diff --git a/drive/download.go b/drive/download.go index 3cb83100..ec1af8a3 100644 --- a/drive/download.go +++ b/drive/download.go @@ -125,6 +125,9 @@ func (self *Drive) downloadBinary(f *drive.File, args DownloadArgs) (int64, int6 res, err := self.service.Files.Get(f.Id).Context(ctx).Download() if err != nil { + if isTimeoutError(err) { + return 0, 0, fmt.Errorf("Failed to download file: timeout, no data was transferred for %v", args.Timeout) + } return 0, 0, fmt.Errorf("Failed to download file: %s", err) } diff --git a/drive/errors.go b/drive/errors.go index e7631f77..465d818a 100644 --- a/drive/errors.go +++ b/drive/errors.go @@ -1,6 +1,7 @@ package drive import ( + "golang.org/x/net/context" "google.golang.org/api/googleapi" "time" ) @@ -16,6 +17,10 @@ func isBackendError(err error) bool { return ok && ae.Code >= 500 && ae.Code <= 599 } +func isTimeoutError(err error) bool { + return err == context.Canceled +} + func exponentialBackoffSleep(try int) { seconds := pow(2, try) time.Sleep(time.Duration(seconds) * time.Second) diff --git a/drive/revision_download.go b/drive/revision_download.go index 57392a5d..6bed8862 100644 --- a/drive/revision_download.go +++ b/drive/revision_download.go @@ -36,6 +36,9 @@ func (self *Drive) DownloadRevision(args DownloadRevisionArgs) (err error) { res, err := getRev.Context(ctx).Download() if err != nil { + if isTimeoutError(err) { + return fmt.Errorf("Failed to download file: timeout, no data was transferred for %v", args.Timeout) + } return fmt.Errorf("Failed to download file: %s", err) } diff --git a/drive/sync_download.go b/drive/sync_download.go index 2494557d..10dfd160 100644 --- a/drive/sync_download.go +++ b/drive/sync_download.go @@ -197,6 +197,8 @@ func (self *Drive) downloadRemoteFile(id, fpath string, args DownloadSyncArgs, t exponentialBackoffSleep(try) try++ return self.downloadRemoteFile(id, fpath, args, try) + } else if isTimeoutError(err) { + return fmt.Errorf("Failed to download file: timeout, no data was transferred for %v", args.Timeout) } else { return fmt.Errorf("Failed to download file: %s", err) } diff --git a/drive/sync_upload.go b/drive/sync_upload.go index c509c0a6..bb1ab337 100644 --- a/drive/sync_upload.go +++ b/drive/sync_upload.go @@ -317,6 +317,8 @@ func (self *Drive) uploadMissingFile(parentId string, lf *LocalFile, args Upload exponentialBackoffSleep(try) try++ return self.uploadMissingFile(parentId, lf, args, try) + } else if isTimeoutError(err) { + return fmt.Errorf("Failed to upload file: timeout, no data was transferred for %v", args.Timeout) } else { return fmt.Errorf("Failed to upload file: %s", err) } @@ -356,6 +358,8 @@ func (self *Drive) updateChangedFile(cf *changedFile, args UploadSyncArgs, try i exponentialBackoffSleep(try) try++ return self.updateChangedFile(cf, args, try) + } else if isTimeoutError(err) { + return fmt.Errorf("Failed to upload file: timeout, no data was transferred for %v", args.Timeout) } else { return fmt.Errorf("Failed to update file: %s", err) } diff --git a/drive/update.go b/drive/update.go index 7af403ff..2ab684e9 100644 --- a/drive/update.go +++ b/drive/update.go @@ -65,6 +65,9 @@ func (self *Drive) Update(args UpdateArgs) error { f, err := self.service.Files.Update(args.Id, dstFile).Fields("id", "name", "size").Context(ctx).Media(reader, chunkSize).Do() if err != nil { + if isTimeoutError(err) { + return fmt.Errorf("Failed to upload file: timeout, no data was transferred for %v", args.Timeout) + } return fmt.Errorf("Failed to upload file: %s", err) } diff --git a/drive/upload.go b/drive/upload.go index 6f5edd57..a344b625 100644 --- a/drive/upload.go +++ b/drive/upload.go @@ -181,6 +181,9 @@ func (self *Drive) uploadFile(args UploadArgs) (*drive.File, int64, error) { f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size", "md5Checksum", "webContentLink").Context(ctx).Media(reader, chunkSize).Do() if err != nil { + if isTimeoutError(err) { + return nil, 0, fmt.Errorf("Failed to upload file: timeout, no data was transferred for %v", args.Timeout) + } return nil, 0, fmt.Errorf("Failed to upload file: %s", err) } @@ -232,6 +235,9 @@ func (self *Drive) UploadStream(args UploadStreamArgs) error { f, err := self.service.Files.Create(dstFile).Fields("id", "name", "size", "webContentLink").Context(ctx).Media(reader, chunkSize).Do() if err != nil { + if isTimeoutError(err) { + return fmt.Errorf("Failed to upload file: timeout, no data was transferred for %v", args.Timeout) + } return fmt.Errorf("Failed to upload file: %s", err) } From 17b8c6511f776bb37ce31f8f64f7cc2f1a3640fb Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 9 Apr 2016 18:36:45 +0200 Subject: [PATCH 164/195] Add minimum go 1.5 version notice #131 --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 37a097e7..c1db064b 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,7 @@ gdrive is a command line utility for interacting with Google Drive. ## Prerequisites None, binaries are statically linked. If you want to compile from source you need the [go toolchain](http://golang.org/doc/install). +Version 1.5 or higher. ## Installation Download `gdrive` from one of the links below. On unix systems From bdd7877be9c503e7968c221f3396d239edb267d2 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 9 Apr 2016 18:54:10 +0200 Subject: [PATCH 165/195] Check both backend and rate limit errors --- drive/errors.go | 15 ++++++++++++++- drive/sync_download.go | 4 ++-- drive/sync_upload.go | 8 ++++---- 3 files changed, 20 insertions(+), 7 deletions(-) diff --git a/drive/errors.go b/drive/errors.go index 465d818a..f70f0f01 100644 --- a/drive/errors.go +++ b/drive/errors.go @@ -6,7 +6,11 @@ import ( "time" ) -const MaxBackendErrorRetries = 5 +const MaxErrorRetries = 5 + +func isBackendOrRateLimitError(err error) bool { + return isBackendError(err) || isRateLimitError(err) +} func isBackendError(err error) bool { if err == nil { @@ -17,6 +21,15 @@ func isBackendError(err error) bool { return ok && ae.Code >= 500 && ae.Code <= 599 } +func isRateLimitError(err error) bool { + if err == nil { + return false + } + + ae, ok := err.(*googleapi.Error) + return ok && ae.Code == 403 +} + func isTimeoutError(err error) bool { return err == context.Canceled } diff --git a/drive/sync_download.go b/drive/sync_download.go index 10dfd160..58cd49c6 100644 --- a/drive/sync_download.go +++ b/drive/sync_download.go @@ -193,7 +193,7 @@ func (self *Drive) downloadRemoteFile(id, fpath string, args DownloadSyncArgs, t res, err := self.service.Files.Get(id).Context(ctx).Download() if err != nil { - if isBackendError(err) && try < MaxBackendErrorRetries { + if isBackendOrRateLimitError(err) && try < MaxErrorRetries { exponentialBackoffSleep(try) try++ return self.downloadRemoteFile(id, fpath, args, try) @@ -231,7 +231,7 @@ func (self *Drive) downloadRemoteFile(id, fpath string, args DownloadSyncArgs, t _, err = io.Copy(outFile, reader) if err != nil { outFile.Close() - if try < MaxBackendErrorRetries { + if try < MaxErrorRetries { exponentialBackoffSleep(try) try++ return self.downloadRemoteFile(id, fpath, args, try) diff --git a/drive/sync_upload.go b/drive/sync_upload.go index bb1ab337..261497d1 100644 --- a/drive/sync_upload.go +++ b/drive/sync_upload.go @@ -270,7 +270,7 @@ func (self *Drive) createMissingRemoteDir(args createMissingRemoteDirArgs) (*dri f, err := self.service.Files.Create(dstFile).Do() if err != nil { - if isBackendError(err) && args.try < MaxBackendErrorRetries { + if isBackendOrRateLimitError(err) && args.try < MaxErrorRetries { exponentialBackoffSleep(args.try) args.try++ return self.createMissingRemoteDir(args) @@ -313,7 +313,7 @@ func (self *Drive) uploadMissingFile(parentId string, lf *LocalFile, args Upload _, err = self.service.Files.Create(dstFile).Fields("id", "name", "size", "md5Checksum").Context(ctx).Media(reader, chunkSize).Do() if err != nil { - if isBackendError(err) && try < MaxBackendErrorRetries { + if isBackendOrRateLimitError(err) && try < MaxErrorRetries { exponentialBackoffSleep(try) try++ return self.uploadMissingFile(parentId, lf, args, try) @@ -354,7 +354,7 @@ func (self *Drive) updateChangedFile(cf *changedFile, args UploadSyncArgs, try i _, err = self.service.Files.Update(cf.remote.file.Id, dstFile).Context(ctx).Media(reader, chunkSize).Do() if err != nil { - if isBackendError(err) && try < MaxBackendErrorRetries { + if isBackendOrRateLimitError(err) && try < MaxErrorRetries { exponentialBackoffSleep(try) try++ return self.updateChangedFile(cf, args, try) @@ -375,7 +375,7 @@ func (self *Drive) deleteRemoteFile(rf *RemoteFile, args UploadSyncArgs, try int err := self.service.Files.Delete(rf.file.Id).Do() if err != nil { - if isBackendError(err) && try < MaxBackendErrorRetries { + if isBackendOrRateLimitError(err) && try < MaxErrorRetries { exponentialBackoffSleep(try) try++ return self.deleteRemoteFile(rf, args, try) From bc5f4534ffe1f1e34ede7c496b3e12865ed2fe97 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 9 Apr 2016 19:24:34 +0200 Subject: [PATCH 166/195] Skip non-regular files #114 --- drive/upload.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drive/upload.go b/drive/upload.go index a344b625..dbf068ca 100644 --- a/drive/upload.go +++ b/drive/upload.go @@ -90,10 +90,12 @@ func (self *Drive) uploadRecursive(args UploadArgs) error { if info.IsDir() { args.Name = "" return self.uploadDirectory(args) - } else { + } else if info.Mode().IsRegular() { _, _, err := self.uploadFile(args) return err } + + return nil } func (self *Drive) uploadDirectory(args UploadArgs) error { From ee1368b2255e2ddb42e4199ff48494dc38e72a0d Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 9 Apr 2016 19:25:11 +0200 Subject: [PATCH 167/195] Version 2.1.0 --- gdrive.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gdrive.go b/gdrive.go index e7d28cf7..ecb69503 100644 --- a/gdrive.go +++ b/gdrive.go @@ -7,7 +7,7 @@ import ( ) const Name = "gdrive" -const Version = "2.0.1" +const Version = "2.1.0" const DefaultMaxFiles = 30 const DefaultMaxChanges = 100 From 264268dbd4f3ddc4b8ed6a97eba34cea1494ae1c Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 9 Apr 2016 21:54:45 +0200 Subject: [PATCH 168/195] Update usage --- README.md | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index c1db064b..72f49c98 100644 --- a/README.md +++ b/README.md @@ -147,12 +147,13 @@ global: --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) options: - -f, --force Overwrite existing file - -r, --recursive Download directory recursively, documents will be skipped - --path Download path - --delete Delete remote file when download is successful - --no-progress Hide progress - --stdout Write file content to stdout + -f, --force Overwrite existing file + -r, --recursive Download directory recursively, documents will be skipped + --path Download path + --delete Delete remote file when download is successful + --no-progress Hide progress + --stdout Write file content to stdout + --timeout Set timeout in seconds, use 0 for no timeout. Timeout is reached when no data is transferred in set amount of seconds, default: 300 ``` #### Download all files and directories matching query @@ -188,6 +189,7 @@ options: --mime Force mime type --share Share file --delete Delete local file when upload is successful + --timeout Set timeout in seconds, use 0 for no timeout. Timeout is reached when no data is transferred in set amount of seconds, default: 300 --chunksize Set chunk size in bytes, default: 8388608 ``` @@ -205,6 +207,7 @@ options: --chunksize Set chunk size in bytes, default: 8388608 --mime Force mime type --share Share file + --timeout Set timeout in seconds, use 0 for no timeout. Timeout is reached when no data is transferred in set amount of seconds, default: 300 --no-progress Hide progress ``` @@ -222,6 +225,7 @@ options: --name Filename --no-progress Hide progress --mime Force mime type + --timeout Set timeout in seconds, use 0 for no timeout. Timeout is reached when no data is transferred in set amount of seconds, default: 300 --chunksize Set chunk size in bytes, default: 8388608 ``` @@ -346,6 +350,7 @@ options: --delete-extraneous Delete extraneous local files --dry-run Show what would have been transferred --no-progress Hide progress + --timeout Set timeout in seconds, use 0 for no timeout. Timeout is reached when no data is transferred in set amount of seconds, default: 300 ``` #### Sync local directory to drive @@ -364,6 +369,7 @@ options: --delete-extraneous Delete extraneous remote files --dry-run Show what would have been transferred --no-progress Hide progress + --timeout Set timeout in seconds, use 0 for no timeout. Timeout is reached when no data is transferred in set amount of seconds, default: 300 --chunksize Set chunk size in bytes, default: 8388608 ``` @@ -409,10 +415,11 @@ global: --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) options: - -f, --force Overwrite existing file - --no-progress Hide progress - --stdout Write file content to stdout - --path Download path + -f, --force Overwrite existing file + --no-progress Hide progress + --stdout Write file content to stdout + --path Download path + --timeout Set timeout in seconds, use 0 for no timeout. Timeout is reached when no data is transferred in set amount of seconds, default: 300 ``` #### Delete file revision From 5b72ec986f53ac0316424de89bdbff4bfb8d61e3 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 9 Apr 2016 22:00:31 +0200 Subject: [PATCH 169/195] Add 2.1.0 download links --- README.md | 54 +++++++++++++++++++++++++++--------------------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/README.md b/README.md index 72f49c98..42bd9a93 100644 --- a/README.md +++ b/README.md @@ -26,33 +26,33 @@ You will be prompted for a new verification code if the folder does not exist. ### Downloads | Filename | Version | Description | Shasum | |:-----------------------|:--------|:-------------------|:-----------------------------------------| -| [gdrive-osx-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnZUM0bXRBeUpYQ00&export=download) | 2.0.1 | OS X 64-bit | 180bc98408c7ec6deac6a66bbd9c307c4348ae6f | -| [gdrive-osx-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnM05hQXhvWm9EOHM&export=download) | 2.0.1 | OS X 32-bit | 366ee217d4327a1855245d8c4a1204f4831eb979 | -| [gdrive-osx-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnelYxRU5LbEVfVzQ&export=download) | 2.0.1 | OS X arm | cdc31f83e50560a7f8fbf8a25b9c87c945d1407f | -| [gdrive-linux-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnWksyTEtCM0VfaFE&export=download) | 2.0.1 | Linux 64-bit | c636778c4a2c76e47ac731c142f4219a19c30263 | -| [gdrive-linux-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnbndOUW50ZVllZ3M&export=download) | 2.0.1 | Linux 32-bit | 0968993e4a70a594e0f315034640fd811977e4f1 | -| [gdrive-linux-rpi](https://docs.google.com/uc?id=0B3X9GlR6EmbnbVRIelM5SG5zcmM&export=download) | 2.0.1 | Linux Raspberry Pi | 7865a1e96e70791aa0f33cb758e6cda7886be240 | -| [gdrive-linux-arm64](https://docs.google.com/uc?id=0B3X9GlR6EmbnNkxrQ3VzOGNMNWc&export=download) | 2.0.1 | Linux arm 64-bit | 00b293e4501da64e956d32b7c1589a014b541abe | -| [gdrive-linux-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnLW51cVJrazVLOG8&export=download) | 2.0.1 | Linux arm 32-bit | 3577a6462dafc47823d5ed053a978af84a99c5af | -| [gdrive-linux-mips64](https://docs.google.com/uc?id=0B3X9GlR6EmbnTS1fejFtaDMyLU0&export=download) | 2.0.1 | Linux mips 64-bit | e1e5992a5635467b84f149435544d980e99f30c6 | -| [gdrive-linux-mips64le](https://docs.google.com/uc?id=0B3X9GlR6EmbnV3pJU0ZBTEg0a2M&export=download) | 2.0.1 | Linux mips 64-bit le | 8273d53fc21b6028de781958c6f224f41c0a92db | -| [gdrive-linux-ppc64](https://docs.google.com/uc?id=0B3X9GlR6EmbnOXNPM01ITHl3NUk&export=download) | 2.0.1 | Linux PPC 64-bit | fc1409b9960ae4a209331e40a4cd9b10a789f8e6 | -| [gdrive-linux-ppc64le](https://docs.google.com/uc?id=0B3X9GlR6EmbnRVBCRlpXNl95bW8&export=download) | 2.0.1 | Linux PPC 64-bit le | 7a09ed4b43c31198efdf4e4a7da1e196cd3cd54f | -| [gdrive-windows-386.exe](https://docs.google.com/uc?id=0B3X9GlR6EmbncnpncGpZX0pULUU&export=download) | 2.0.1 | Window 32-bit | 134480ca113d03b91dbfa43326704b57f07ca547 | -| [gdrive-windows-x64.exe](https://docs.google.com/uc?id=0B3X9GlR6EmbncWNLOS1KYWhLVFE&export=download) | 2.0.1 | Windows 64-bit | c68df6e77aa7fa412bfe318ab270e1245a24966b | -| [gdrive-dragonfly-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnX1FiZzFaN1hRekk&export=download) | 2.0.1 | DragonFly BSD 64-bit | 0116d291a859152a4af842254eb88102c18f9ad6 | -| [gdrive-freebsd-x64](https://docs.google.com/uc?id=0B3X9GlR6Embnck8wR0ozV3J2WHM&export=download) | 2.0.1 | FreeBSD 64-bit | 0ab7d8509efcbbf41a4c684f9942c749706ea8ab | -| [gdrive-freebsd-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnVUtwX010YUlENWs&export=download) | 2.0.1 | FreeBSD 32-bit | 4e1ac2aeeed59df1d8636641b796aad54ade42d2 | -| [gdrive-freebsd-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbncXV3SU9NZDc3X3M&export=download) | 2.0.1 | FreeBSD arm | 1220f7f75579b28205d302f1e8ad0faeefc5d188 | -| [gdrive-netbsd-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnUk5JMVFiS0pKZW8&export=download) | 2.0.1 | NetBSD 64-bit | dba9e9f57b6ed3c370961e95efaea1ed0ea4429f | -| [gdrive-netbsd-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnLWZRTy1LX1kxUkE&export=download) | 2.0.1 | NetBSD 32-bit | 8303ec4f0f7ba2acecc4509e0e73f8bf1eb2cd68 | -| [gdrive-netbsd-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnQUNFOWxBYmdjUEk&export=download) | 2.0.1 | NetBSD arm | 84aeb602e0cfbb09a35fff97f2af5990673f9bee | -| [gdrive-openbsd-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnYk50VlVuNUhuaVE&export=download) | 2.0.1 | OpenBSD 64-bit | 729794ef7cfc320cab84fd78e3113f5fdd476ac6 | -| [gdrive-openbsd-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnOExCLVNYakJiSUk&export=download) | 2.0.1 | OpenBSD 32-bit | a1f45252d86ae238ce17b49c226b218aec805a02 | -| [gdrive-openbsd-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnN1htLTBSSElRUDQ&export=download) | 2.0.1 | OpenBSD arm | 5a732b15a4d36e61768388e323807e9a2fccb4bc | -| [gdrive-solaris-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnSEpJSDBidHNDNkE&export=download) | 2.0.1 | Solaris 64-bit | 82dc342873693b37302fc8f3cb97a667bae6e41c | -| [gdrive-plan9-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnZWMzR3pyZlZ0cFU&export=download) | 2.0.1 | Plan9 64-bit | e92e4a25517116c4fd466142baab03e0b9d11772 | -| [gdrive-plan9-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnOXV4d0dNeGZCR00&export=download) | 2.0.1 | Plan9 32-bit | efdfced751ca43995ad28dc1aa96b29a8c237433 | +| [gdrive-osx-x64](https://docs.google.com/uc?id=0B3X9GlR6Embnb010SnpUV0s2ZkU&export=download) | 2.1.0 | OS X 64-bit | 297ccf3c945b364b5d306cef335ba44b0900e927 | +| [gdrive-osx-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnTjByNlNvZVNRTjQ&export=download) | 2.1.0 | OS X 32-bit | c64714676a5b028aeeaf09e5f3b84d363e0ec7ed | +| [gdrive-osx-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnbURvYnVyVmNNX2M&export=download) | 2.1.0 | OS X arm | eb23b7bb5a072497372bd253e8fc8353bec8a64c | +| [gdrive-linux-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnQ0FtZmJJUXEyRTA&export=download) | 2.1.0 | Linux 64-bit | 4fd8391b300cac45963e53da44dcfe68da08d843 | +| [gdrive-linux-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnLV92dHBpTkFhTEU&export=download) | 2.1.0 | Linux 32-bit | de9f49565fc62552fe862f08f84694ab4653adc2 | +| [gdrive-linux-rpi](https://docs.google.com/uc?id=0B3X9GlR6EmbnVXNLanp4ZFRRbzg&export=download) | 2.1.0 | Linux Raspberry Pi | e26e9ca3df3d08f970a276782ac5e92731c85467 | +| [gdrive-linux-arm64](https://docs.google.com/uc?id=0B3X9GlR6EmbnamliN0Rld01oRVk&export=download) | 2.1.0 | Linux arm 64-bit | 3d670905e13edf96d43c9f97293bdba62c740926 | +| [gdrive-linux-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnRjBaMVVLalN4cTA&export=download) | 2.1.0 | Linux arm 32-bit | 5b1036e0ef479ce228f7c32d1adfdc3840d71d10 | +| [gdrive-linux-mips64](https://docs.google.com/uc?id=0B3X9GlR6Embna2lzdEJ6blFzSzQ&export=download) | 2.1.0 | Linux mips 64-bit | 334bbd74b87fd1d05550e366724fe8e3c9e61ca4 | +| [gdrive-linux-mips64le](https://docs.google.com/uc?id=0B3X9GlR6EmbnWFk4Q3ZVZ1g3ZHM&export=download) | 2.1.0 | Linux mips 64-bit le | bb6961a2c03c074e6d34a1ec280cc69f5d5002f5 | +| [gdrive-linux-ppc64](https://docs.google.com/uc?id=0B3X9GlR6EmbnS09XMzhfRXBnUzA&export=download) | 2.1.0 | Linux PPC 64-bit | 70a1ac5be9ba819da5cf7a8dbd513805a26509ac | +| [gdrive-linux-ppc64le](https://docs.google.com/uc?id=0B3X9GlR6EmbneDJ2b3hqbVlNZnc&export=download) | 2.1.0 | Linux PPC 64-bit le | f426817ee4824b83b978f82f8e72eac6db92f2d1 | +| [gdrive-windows-386.exe](https://docs.google.com/uc?id=0B3X9GlR6EmbnV3RNeFVUQjZvS2c&export=download) | 2.1.0 | Window 32-bit | 1429200631b598543eddc3df3487117cad95adbb | +| [gdrive-windows-x64.exe](https://docs.google.com/uc?id=0B3X9GlR6EmbnNFRSSW1GaFBSRk0&export=download) | 2.1.0 | Windows 64-bit | 16ccab7c66b144e5806daeb2ba50d567b51504ca | +| [gdrive-dragonfly-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnelNIdmRMMGpVa2s&export=download) | 2.1.0 | DragonFly BSD 64-bit | dc214a24e59f68d99ca62757d99099051f83804a | +| [gdrive-freebsd-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnQkN0NnUwZ0tKLXM&export=download) | 2.1.0 | FreeBSD 64-bit | 93a5581652f9c01c47fb6c16e8ae655182f265da | +| [gdrive-freebsd-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnNU5rbXBzeEhhOTA&export=download) | 2.1.0 | FreeBSD 32-bit | b9a3ee1e0fdbb5fa970942ab89b354ee863a5758 | +| [gdrive-freebsd-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnVHpUbVFzZzNqeW8&export=download) | 2.1.0 | FreeBSD arm | 7f5d1dedaa98501932ea368f2baba240da0b00d8 | +| [gdrive-netbsd-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnbGJobnBQR0dtV2c&export=download) | 2.1.0 | NetBSD 64-bit | 2a088dbd1e149204eb71a47ade109816983fe53f | +| [gdrive-netbsd-386](https://docs.google.com/uc?id=0B3X9GlR6EmbneWszMnl5RGZnYWs&export=download) | 2.1.0 | NetBSD 32-bit | a2c231b91839171a58da780657c445d4a1430537 | +| [gdrive-netbsd-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnVVhOWG9UUUhWNVU&export=download) | 2.1.0 | NetBSD arm | ac8a6354f8a8346c2bf84585e14f4a2cc69451db | +| [gdrive-openbsd-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnSy1JdFlHdUYyaGs&export=download) | 2.1.0 | OpenBSD 64-bit | 54be1d38b9014c6a8de5d71233cd6f208c27ac1c | +| [gdrive-openbsd-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnRWhIZFRCNE1OdWc&export=download) | 2.1.0 | OpenBSD 32-bit | c2e08a9c7242de6d6ffa01598425fea0550076b8 | +| [gdrive-openbsd-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnWnAzMTNZanp2UEE&export=download) | 2.1.0 | OpenBSD arm | 22cd413c2705012b2ac78e64cc9f2b5bfa96dbea | +| [gdrive-solaris-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnWEtENmQ5dDJtTHc&export=download) | 2.1.0 | Solaris 64-bit | 2da03dfcc818a0bd3588ad850349a5a2554913fb | +| [gdrive-plan9-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnRmVyelhxLUUySjA&export=download) | 2.1.0 | Plan9 64-bit | 7b498ce0f416a3e8c1e17f603d21a3e84c1a9283 | +| [gdrive-plan9-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnckdHZVdRZ0dZTU0&export=download) | 2.1.0 | Plan9 32-bit | cccd9ba86774bc6bd70f092158e2fcafa94601c0 | ## Compile from source ```bash From b5eb2866cfceb69b0d4dd4948273d679a884fbb2 Mon Sep 17 00:00:00 2001 From: Paul Zabelin Date: Sun, 17 Apr 2016 03:22:31 -0700 Subject: [PATCH 170/195] add Go dependencies by godep see https://github.com/tools/godep --- Godeps/Godeps.json | 48 + Godeps/Readme | 5 + .../sabhiram/go-git-ignore/.gitignore | 28 + .../sabhiram/go-git-ignore/.travis.yml | 18 + .../github.com/sabhiram/go-git-ignore/LICENSE | 22 + .../sabhiram/go-git-ignore/README.md | 17 + .../sabhiram/go-git-ignore/ignore.go | 200 + vendor/github.com/soniakeys/graph/.gitignore | 2 + vendor/github.com/soniakeys/graph/.travis.yml | 8 + vendor/github.com/soniakeys/graph/adj.go | 325 + vendor/github.com/soniakeys/graph/adj_RO.go | 387 + vendor/github.com/soniakeys/graph/adj_cg.go | 387 + vendor/github.com/soniakeys/graph/bits.go | 207 + vendor/github.com/soniakeys/graph/bits32.go | 23 + vendor/github.com/soniakeys/graph/bits64.go | 22 + vendor/github.com/soniakeys/graph/dir.go | 538 ++ vendor/github.com/soniakeys/graph/dir_RO.go | 395 + vendor/github.com/soniakeys/graph/dir_cg.go | 395 + vendor/github.com/soniakeys/graph/doc.go | 128 + vendor/github.com/soniakeys/graph/fromlist.go | 418 ++ vendor/github.com/soniakeys/graph/graph.go | 181 + vendor/github.com/soniakeys/graph/hacking.md | 37 + vendor/github.com/soniakeys/graph/mst.go | 244 + vendor/github.com/soniakeys/graph/random.go | 325 + vendor/github.com/soniakeys/graph/readme.md | 38 + vendor/github.com/soniakeys/graph/sssp.go | 881 +++ vendor/github.com/soniakeys/graph/travis.sh | 11 + vendor/github.com/soniakeys/graph/undir.go | 321 + vendor/github.com/soniakeys/graph/undir_RO.go | 659 ++ vendor/github.com/soniakeys/graph/undir_cg.go | 659 ++ vendor/golang.org/x/net/LICENSE | 27 + vendor/golang.org/x/net/PATENTS | 22 + vendor/golang.org/x/net/context/context.go | 156 + .../x/net/context/ctxhttp/cancelreq.go | 19 + .../x/net/context/ctxhttp/cancelreq_go14.go | 23 + .../x/net/context/ctxhttp/ctxhttp.go | 145 + vendor/golang.org/x/net/context/go17.go | 72 + vendor/golang.org/x/net/context/pre_go17.go | 300 + vendor/golang.org/x/oauth2/.travis.yml | 14 + vendor/golang.org/x/oauth2/AUTHORS | 3 + vendor/golang.org/x/oauth2/CONTRIBUTING.md | 31 + vendor/golang.org/x/oauth2/CONTRIBUTORS | 3 + vendor/golang.org/x/oauth2/LICENSE | 27 + vendor/golang.org/x/oauth2/README.md | 64 + .../golang.org/x/oauth2/client_appengine.go | 25 + vendor/golang.org/x/oauth2/internal/oauth2.go | 76 + vendor/golang.org/x/oauth2/internal/token.go | 225 + .../golang.org/x/oauth2/internal/transport.go | 69 + vendor/golang.org/x/oauth2/oauth2.go | 337 + vendor/golang.org/x/oauth2/token.go | 158 + vendor/golang.org/x/oauth2/transport.go | 132 + vendor/google.golang.org/api/LICENSE | 27 + .../api/drive/v3/drive-api.json | 2410 ++++++ .../api/drive/v3/drive-gen.go | 6434 +++++++++++++++++ .../api/gensupport/backoff.go | 46 + .../api/gensupport/buffer.go | 77 + .../google.golang.org/api/gensupport/doc.go | 10 + .../google.golang.org/api/gensupport/json.go | 172 + .../google.golang.org/api/gensupport/media.go | 200 + .../api/gensupport/params.go | 50 + .../api/gensupport/resumable.go | 198 + .../google.golang.org/api/gensupport/retry.go | 77 + .../api/googleapi/googleapi.go | 432 ++ .../googleapi/internal/uritemplates/LICENSE | 18 + .../internal/uritemplates/uritemplates.go | 220 + .../googleapi/internal/uritemplates/utils.go | 13 + .../google.golang.org/api/googleapi/types.go | 182 + 67 files changed, 19423 insertions(+) create mode 100644 Godeps/Godeps.json create mode 100644 Godeps/Readme create mode 100644 vendor/github.com/sabhiram/go-git-ignore/.gitignore create mode 100644 vendor/github.com/sabhiram/go-git-ignore/.travis.yml create mode 100644 vendor/github.com/sabhiram/go-git-ignore/LICENSE create mode 100644 vendor/github.com/sabhiram/go-git-ignore/README.md create mode 100644 vendor/github.com/sabhiram/go-git-ignore/ignore.go create mode 100644 vendor/github.com/soniakeys/graph/.gitignore create mode 100644 vendor/github.com/soniakeys/graph/.travis.yml create mode 100644 vendor/github.com/soniakeys/graph/adj.go create mode 100644 vendor/github.com/soniakeys/graph/adj_RO.go create mode 100644 vendor/github.com/soniakeys/graph/adj_cg.go create mode 100644 vendor/github.com/soniakeys/graph/bits.go create mode 100644 vendor/github.com/soniakeys/graph/bits32.go create mode 100644 vendor/github.com/soniakeys/graph/bits64.go create mode 100644 vendor/github.com/soniakeys/graph/dir.go create mode 100644 vendor/github.com/soniakeys/graph/dir_RO.go create mode 100644 vendor/github.com/soniakeys/graph/dir_cg.go create mode 100644 vendor/github.com/soniakeys/graph/doc.go create mode 100644 vendor/github.com/soniakeys/graph/fromlist.go create mode 100644 vendor/github.com/soniakeys/graph/graph.go create mode 100644 vendor/github.com/soniakeys/graph/hacking.md create mode 100644 vendor/github.com/soniakeys/graph/mst.go create mode 100644 vendor/github.com/soniakeys/graph/random.go create mode 100644 vendor/github.com/soniakeys/graph/readme.md create mode 100644 vendor/github.com/soniakeys/graph/sssp.go create mode 100644 vendor/github.com/soniakeys/graph/travis.sh create mode 100644 vendor/github.com/soniakeys/graph/undir.go create mode 100644 vendor/github.com/soniakeys/graph/undir_RO.go create mode 100644 vendor/github.com/soniakeys/graph/undir_cg.go create mode 100644 vendor/golang.org/x/net/LICENSE create mode 100644 vendor/golang.org/x/net/PATENTS create mode 100644 vendor/golang.org/x/net/context/context.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/cancelreq.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go create mode 100644 vendor/golang.org/x/net/context/go17.go create mode 100644 vendor/golang.org/x/net/context/pre_go17.go create mode 100644 vendor/golang.org/x/oauth2/.travis.yml create mode 100644 vendor/golang.org/x/oauth2/AUTHORS create mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTING.md create mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTORS create mode 100644 vendor/golang.org/x/oauth2/LICENSE create mode 100644 vendor/golang.org/x/oauth2/README.md create mode 100644 vendor/golang.org/x/oauth2/client_appengine.go create mode 100644 vendor/golang.org/x/oauth2/internal/oauth2.go create mode 100644 vendor/golang.org/x/oauth2/internal/token.go create mode 100644 vendor/golang.org/x/oauth2/internal/transport.go create mode 100644 vendor/golang.org/x/oauth2/oauth2.go create mode 100644 vendor/golang.org/x/oauth2/token.go create mode 100644 vendor/golang.org/x/oauth2/transport.go create mode 100644 vendor/google.golang.org/api/LICENSE create mode 100644 vendor/google.golang.org/api/drive/v3/drive-api.json create mode 100644 vendor/google.golang.org/api/drive/v3/drive-gen.go create mode 100644 vendor/google.golang.org/api/gensupport/backoff.go create mode 100644 vendor/google.golang.org/api/gensupport/buffer.go create mode 100644 vendor/google.golang.org/api/gensupport/doc.go create mode 100644 vendor/google.golang.org/api/gensupport/json.go create mode 100644 vendor/google.golang.org/api/gensupport/media.go create mode 100644 vendor/google.golang.org/api/gensupport/params.go create mode 100644 vendor/google.golang.org/api/gensupport/resumable.go create mode 100644 vendor/google.golang.org/api/gensupport/retry.go create mode 100644 vendor/google.golang.org/api/googleapi/googleapi.go create mode 100644 vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE create mode 100644 vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go create mode 100644 vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go create mode 100644 vendor/google.golang.org/api/googleapi/types.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json new file mode 100644 index 00000000..e6f80325 --- /dev/null +++ b/Godeps/Godeps.json @@ -0,0 +1,48 @@ +{ + "ImportPath": "github.com/prasmussen/gdrive", + "GoVersion": "go1.6", + "GodepVersion": "v61", + "Deps": [ + { + "ImportPath": "github.com/sabhiram/go-git-ignore", + "Rev": "228fcfa2a06e870a3ef238d54c45ea847f492a37" + }, + { + "ImportPath": "github.com/soniakeys/graph", + "Comment": "svg-v0-58-gc265d96", + "Rev": "c265d9676750b13b9520ba4ad4f8359fa1aed9fd" + }, + { + "ImportPath": "golang.org/x/net/context", + "Rev": "fb93926129b8ec0056f2f458b1f519654814edf0" + }, + { + "ImportPath": "golang.org/x/net/context/ctxhttp", + "Rev": "fb93926129b8ec0056f2f458b1f519654814edf0" + }, + { + "ImportPath": "golang.org/x/oauth2", + "Rev": "7e9cd5d59563851383f8f81a7fbb01213709387c" + }, + { + "ImportPath": "golang.org/x/oauth2/internal", + "Rev": "7e9cd5d59563851383f8f81a7fbb01213709387c" + }, + { + "ImportPath": "google.golang.org/api/drive/v3", + "Rev": "9737cc9e103c00d06a8f3993361dec083df3d252" + }, + { + "ImportPath": "google.golang.org/api/gensupport", + "Rev": "9737cc9e103c00d06a8f3993361dec083df3d252" + }, + { + "ImportPath": "google.golang.org/api/googleapi", + "Rev": "9737cc9e103c00d06a8f3993361dec083df3d252" + }, + { + "ImportPath": "google.golang.org/api/googleapi/internal/uritemplates", + "Rev": "9737cc9e103c00d06a8f3993361dec083df3d252" + } + ] +} diff --git a/Godeps/Readme b/Godeps/Readme new file mode 100644 index 00000000..4cdaa53d --- /dev/null +++ b/Godeps/Readme @@ -0,0 +1,5 @@ +This directory tree is generated automatically by godep. + +Please do not edit. + +See https://github.com/tools/godep for more information. diff --git a/vendor/github.com/sabhiram/go-git-ignore/.gitignore b/vendor/github.com/sabhiram/go-git-ignore/.gitignore new file mode 100644 index 00000000..0e919aff --- /dev/null +++ b/vendor/github.com/sabhiram/go-git-ignore/.gitignore @@ -0,0 +1,28 @@ +# Package test fixtures +test_fixtures + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + diff --git a/vendor/github.com/sabhiram/go-git-ignore/.travis.yml b/vendor/github.com/sabhiram/go-git-ignore/.travis.yml new file mode 100644 index 00000000..24ddadf1 --- /dev/null +++ b/vendor/github.com/sabhiram/go-git-ignore/.travis.yml @@ -0,0 +1,18 @@ +language: go + +go: + - 1.3 + - tip + +env: + - "PATH=$HOME/gopath/bin:$PATH" + +before_install: + - go get github.com/stretchr/testify/assert + - go get github.com/axw/gocov/gocov + - go get github.com/mattn/goveralls + - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi + +script: + - go test -v -covermode=count -coverprofile=coverage.out + - goveralls -coverprofile=coverage.out -service travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/github.com/sabhiram/go-git-ignore/LICENSE b/vendor/github.com/sabhiram/go-git-ignore/LICENSE new file mode 100644 index 00000000..c606f49e --- /dev/null +++ b/vendor/github.com/sabhiram/go-git-ignore/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Shaba Abhiram + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/sabhiram/go-git-ignore/README.md b/vendor/github.com/sabhiram/go-git-ignore/README.md new file mode 100644 index 00000000..fbbb3761 --- /dev/null +++ b/vendor/github.com/sabhiram/go-git-ignore/README.md @@ -0,0 +1,17 @@ +# go-git-ignore + +[![Build Status](https://travis-ci.org/sabhiram/go-git-ignore.svg)](https://travis-ci.org/sabhiram/go-git-ignore) [![Coverage Status](https://coveralls.io/repos/sabhiram/go-git-ignore/badge.png?branch=master)](https://coveralls.io/r/sabhiram/go-git-ignore?branch=master) + +A gitignore parser for `Go` + +## Install + +```shell +go get github.com/sabhiram/go-git-ignore +``` + +## Usage + +```shell +TODO +``` diff --git a/vendor/github.com/sabhiram/go-git-ignore/ignore.go b/vendor/github.com/sabhiram/go-git-ignore/ignore.go new file mode 100644 index 00000000..e3241b2c --- /dev/null +++ b/vendor/github.com/sabhiram/go-git-ignore/ignore.go @@ -0,0 +1,200 @@ +/* +ignore is a library which returns a new ignorer object which can +test against various paths. This is particularly useful when trying +to filter files based on a .gitignore document + +The rules for parsing the input file are the same as the ones listed +in the Git docs here: http://git-scm.com/docs/gitignore + +The summarized version of the same has been copied here: + + 1. A blank line matches no files, so it can serve as a separator + for readability. + 2. A line starting with # serves as a comment. Put a backslash ("\") + in front of the first hash for patterns that begin with a hash. + 3. Trailing spaces are ignored unless they are quoted with backslash ("\"). + 4. An optional prefix "!" which negates the pattern; any matching file + excluded by a previous pattern will become included again. It is not + possible to re-include a file if a parent directory of that file is + excluded. Git doesn’t list excluded directories for performance reasons, + so any patterns on contained files have no effect, no matter where they + are defined. Put a backslash ("\") in front of the first "!" for + patterns that begin with a literal "!", for example, "\!important!.txt". + 5. If the pattern ends with a slash, it is removed for the purpose of the + following description, but it would only find a match with a directory. + In other words, foo/ will match a directory foo and paths underneath it, + but will not match a regular file or a symbolic link foo (this is + consistent with the way how pathspec works in general in Git). + 6. If the pattern does not contain a slash /, Git treats it as a shell glob + pattern and checks for a match against the pathname relative to the + location of the .gitignore file (relative to the toplevel of the work + tree if not from a .gitignore file). + 7. Otherwise, Git treats the pattern as a shell glob suitable for + consumption by fnmatch(3) with the FNM_PATHNAME flag: wildcards in the + pattern will not match a / in the pathname. For example, + "Documentation/*.html" matches "Documentation/git.html" but not + "Documentation/ppc/ppc.html" or "tools/perf/Documentation/perf.html". + 8. A leading slash matches the beginning of the pathname. For example, + "/*.c" matches "cat-file.c" but not "mozilla-sha1/sha1.c". + 9. Two consecutive asterisks ("**") in patterns matched against full + pathname may have special meaning: + i. A leading "**" followed by a slash means match in all directories. + For example, "** /foo" matches file or directory "foo" anywhere, + the same as pattern "foo". "** /foo/bar" matches file or directory + "bar" anywhere that is directly under directory "foo". + ii. A trailing "/**" matches everything inside. For example, "abc/**" + matches all files inside directory "abc", relative to the location + of the .gitignore file, with infinite depth. + iii. A slash followed by two consecutive asterisks then a slash matches + zero or more directories. For example, "a/** /b" matches "a/b", + "a/x/b", "a/x/y/b" and so on. + iv. Other consecutive asterisks are considered invalid. */ +package ignore + +import ( + "io/ioutil" + "os" + "regexp" + "strings" +) + +// An IgnoreParser is an interface which exposes two methods: +// MatchesPath() - Returns true if the path is targeted by the patterns compiled in the GitIgnore structure +type IgnoreParser interface { + IncludesPath(f string) bool + IgnoresPath(f string) bool + MatchesPath(f string) bool +} + +// GitIgnore is a struct which contains a slice of regexp.Regexp +// patterns +type GitIgnore struct { + patterns []*regexp.Regexp // List of regexp patterns which this ignore file applies + negate []bool // List of booleans which determine if the pattern is negated +} + +// This function pretty much attempts to mimic the parsing rules +// listed above at the start of this file +func getPatternFromLine(line string) (*regexp.Regexp, bool) { + // Trim OS-specific carriage returns. + line = strings.TrimRight(line, "\r") + + // Strip comments [Rule 2] + if strings.HasPrefix(line, `#`) { + return nil, false + } + + // Trim string [Rule 3] + // TODO: Handle [Rule 3], when the " " is escaped with a \ + line = strings.Trim(line, " ") + + // Exit for no-ops and return nil which will prevent us from + // appending a pattern against this line + if line == "" { + return nil, false + } + + // TODO: Handle [Rule 4] which negates the match for patterns leading with "!" + negatePattern := false + if line[0] == '!' { + negatePattern = true + line = line[1:] + } + + // Handle [Rule 2, 4], when # or ! is escaped with a \ + // Handle [Rule 4] once we tag negatePattern, strip the leading ! char + if regexp.MustCompile(`^(\#|\!)`).MatchString(line) { + line = line[1:] + } + + // If we encounter a foo/*.blah in a folder, prepend the / char + if regexp.MustCompile(`([^\/+])/.*\*\.`).MatchString(line) && line[0] != '/' { + line = "/" + line + } + + // Handle escaping the "." char + line = regexp.MustCompile(`\.`).ReplaceAllString(line, `\.`) + + magicStar := "#$~" + + // Handle "/**/" usage + if strings.HasPrefix(line, "/**/") { + line = line[1:] + } + line = regexp.MustCompile(`/\*\*/`).ReplaceAllString(line, `(/|/.+/)`) + line = regexp.MustCompile(`\*\*/`).ReplaceAllString(line, `(|.`+magicStar+`/)`) + line = regexp.MustCompile(`/\*\*`).ReplaceAllString(line, `(|/.`+magicStar+`)`) + + // Handle escaping the "*" char + line = regexp.MustCompile(`\\\*`).ReplaceAllString(line, `\`+magicStar) + line = regexp.MustCompile(`\*`).ReplaceAllString(line, `([^/]*)`) + + // Handle escaping the "?" char + line = strings.Replace(line, "?", `\?`, -1) + + line = strings.Replace(line, magicStar, "*", -1) + + // Temporary regex + var expr = "" + if strings.HasSuffix(line, "/") { + expr = line + "(|.*)$" + } else { + expr = line + "(|/.*)$" + } + if strings.HasPrefix(expr, "/") { + expr = "^(|/)" + expr[1:] + } else { + expr = "^(|.*/)" + expr + } + pattern, _ := regexp.Compile(expr) + + return pattern, negatePattern +} + +// Accepts a variadic set of strings, and returns a GitIgnore object which +// converts and appends the lines in the input to regexp.Regexp patterns +// held within the GitIgnore objects "patterns" field +func CompileIgnoreLines(lines ...string) (*GitIgnore, error) { + g := new(GitIgnore) + for _, line := range lines { + pattern, negatePattern := getPatternFromLine(line) + if pattern != nil { + g.patterns = append(g.patterns, pattern) + g.negate = append(g.negate, negatePattern) + } + } + return g, nil +} + +// Accepts a ignore file as the input, parses the lines out of the file +// and invokes the CompileIgnoreLines method +func CompileIgnoreFile(fpath string) (*GitIgnore, error) { + buffer, error := ioutil.ReadFile(fpath) + if error == nil { + s := strings.Split(string(buffer), "\n") + return CompileIgnoreLines(s...) + } + return nil, error +} + +// MatchesPath is an interface function for the IgnoreParser interface. +// It returns true if the given GitIgnore structure would target a given +// path string "f" +func (g GitIgnore) MatchesPath(f string) bool { + // Replace OS-specific path separator. + f = strings.Replace(f, string(os.PathSeparator), "/", -1) + + matchesPath := false + for idx, pattern := range g.patterns { + if pattern.MatchString(f) { + // If this is a regular target (not negated with a gitignore exclude "!" etc) + if !g.negate[idx] { + matchesPath = true + // Negated pattern, and matchesPath is already set + } else if matchesPath { + matchesPath = false + } + } + } + return matchesPath +} diff --git a/vendor/github.com/soniakeys/graph/.gitignore b/vendor/github.com/soniakeys/graph/.gitignore new file mode 100644 index 00000000..3be61584 --- /dev/null +++ b/vendor/github.com/soniakeys/graph/.gitignore @@ -0,0 +1,2 @@ +*.dot + diff --git a/vendor/github.com/soniakeys/graph/.travis.yml b/vendor/github.com/soniakeys/graph/.travis.yml new file mode 100644 index 00000000..bcc4f9fe --- /dev/null +++ b/vendor/github.com/soniakeys/graph/.travis.yml @@ -0,0 +1,8 @@ +sudo: false +language: go +# update travis.sh when changing version number here +go: + - 1.2.1 + - 1.6 +install: go get -t ./... +script: ./travis.sh diff --git a/vendor/github.com/soniakeys/graph/adj.go b/vendor/github.com/soniakeys/graph/adj.go new file mode 100644 index 00000000..165f365b --- /dev/null +++ b/vendor/github.com/soniakeys/graph/adj.go @@ -0,0 +1,325 @@ +// Copyright 2014 Sonia Keys +// License MIT: https://opensource.org/licenses/MIT + +package graph + +// adj.go contains methods on AdjacencyList and LabeledAdjacencyList. +// +// AdjacencyList methods are placed first and are alphabetized. +// LabeledAdjacencyList methods follow, also alphabetized. +// Only exported methods need be alphabetized; non-exported methods can +// be left near their use. + +import ( + "math" + "sort" +) + +// HasParallelSort identifies if a graph contains parallel arcs, multiple arcs +// that lead from a node to the same node. +// +// If the graph has parallel arcs, the results fr and to represent an example +// where there are parallel arcs from node fr to node to. +// +// If there are no parallel arcs, the method returns false -1 -1. +// +// Multiple loops on a node count as parallel arcs. +// +// "Sort" in the method name indicates that sorting is used to detect parallel +// arcs. Compared to method HasParallelMap, this may give better performance +// for small or sparse graphs but will have asymtotically worse performance for +// large dense graphs. +func (g AdjacencyList) HasParallelSort() (has bool, fr, to NI) { + var t NodeList + for n, to := range g { + if len(to) == 0 { + continue + } + // different code in the labeled version, so no code gen. + t = append(t[:0], to...) + sort.Sort(t) + t0 := t[0] + for _, to := range t[1:] { + if to == t0 { + return true, NI(n), t0 + } + t0 = to + } + } + return false, -1, -1 +} + +// IsUndirected returns true if g represents an undirected graph. +// +// Returns true when all non-loop arcs are paired in reciprocal pairs. +// Otherwise returns false and an example unpaired arc. +func (g AdjacencyList) IsUndirected() (u bool, from, to NI) { + // similar code in dot/writeUndirected + unpaired := make(AdjacencyList, len(g)) + for fr, to := range g { + arc: // for each arc in g + for _, to := range to { + if to == NI(fr) { + continue // loop + } + // search unpaired arcs + ut := unpaired[to] + for i, u := range ut { + if u == NI(fr) { // found reciprocal + last := len(ut) - 1 + ut[i] = ut[last] + unpaired[to] = ut[:last] + continue arc + } + } + // reciprocal not found + unpaired[fr] = append(unpaired[fr], to) + } + } + for fr, to := range unpaired { + if len(to) > 0 { + return false, NI(fr), to[0] + } + } + return true, -1, -1 +} + +// Edgelist constructs the edge list rerpresentation of a graph. +// +// An edge is returned for each arc of the graph. For undirected graphs +// this includes reciprocal edges. +// +// See also WeightedEdgeList method. +func (g LabeledAdjacencyList) EdgeList() (el []LabeledEdge) { + for fr, to := range g { + for _, to := range to { + el = append(el, LabeledEdge{Edge{NI(fr), to.To}, to.Label}) + } + } + return +} + +// FloydWarshall finds all pairs shortest distances for a simple weighted +// graph without negative cycles. +// +// In result array d, d[i][j] will be the shortest distance from node i +// to node j. Any diagonal element < 0 indicates a negative cycle exists. +// +// If g is an undirected graph with no negative edge weights, the result +// array will be a distance matrix, for example as used by package +// github.com/soniakeys/cluster. +func (g LabeledAdjacencyList) FloydWarshall(w WeightFunc) (d [][]float64) { + d = newFWd(len(g)) + for fr, to := range g { + for _, to := range to { + d[fr][to.To] = w(to.Label) + } + } + solveFW(d) + return +} + +// little helper function, makes a blank matrix for FloydWarshall. +func newFWd(n int) [][]float64 { + d := make([][]float64, n) + for i := range d { + di := make([]float64, n) + for j := range di { + if j != i { + di[j] = math.Inf(1) + } + } + d[i] = di + } + return d +} + +// Floyd Warshall solver, once the matrix d is initialized by arc weights. +func solveFW(d [][]float64) { + for k, dk := range d { + for _, di := range d { + dik := di[k] + for j := range d { + if d2 := dik + dk[j]; d2 < di[j] { + di[j] = d2 + } + } + } + } +} + +// HasArcLabel returns true if g has any arc from node fr to node to +// with label l. +// +// Also returned is the index within the slice of arcs from node fr. +// If no arc from fr to to is present, HasArcLabel returns false, -1. +func (g LabeledAdjacencyList) HasArcLabel(fr, to NI, l LI) (bool, int) { + t := Half{to, l} + for x, h := range g[fr] { + if h == t { + return true, x + } + } + return false, -1 +} + +// HasParallelSort identifies if a graph contains parallel arcs, multiple arcs +// that lead from a node to the same node. +// +// If the graph has parallel arcs, the results fr and to represent an example +// where there are parallel arcs from node fr to node to. +// +// If there are no parallel arcs, the method returns -1 -1. +// +// Multiple loops on a node count as parallel arcs. +// +// "Sort" in the method name indicates that sorting is used to detect parallel +// arcs. Compared to method HasParallelMap, this may give better performance +// for small or sparse graphs but will have asymtotically worse performance for +// large dense graphs. +func (g LabeledAdjacencyList) HasParallelSort() (has bool, fr, to NI) { + var t NodeList + for n, to := range g { + if len(to) == 0 { + continue + } + // slightly different code needed here compared to AdjacencyList + t = t[:0] + for _, to := range to { + t = append(t, to.To) + } + sort.Sort(t) + t0 := t[0] + for _, to := range t[1:] { + if to == t0 { + return true, NI(n), t0 + } + t0 = to + } + } + return false, -1, -1 +} + +// IsUndirected returns true if g represents an undirected graph. +// +// Returns true when all non-loop arcs are paired in reciprocal pairs with +// matching labels. Otherwise returns false and an example unpaired arc. +// +// Note the requirement that reciprocal pairs have matching labels is +// an additional test not present in the otherwise equivalent unlabeled version +// of IsUndirected. +func (g LabeledAdjacencyList) IsUndirected() (u bool, from NI, to Half) { + unpaired := make(LabeledAdjacencyList, len(g)) + for fr, to := range g { + arc: // for each arc in g + for _, to := range to { + if to.To == NI(fr) { + continue // loop + } + // search unpaired arcs + ut := unpaired[to.To] + for i, u := range ut { + if u.To == NI(fr) && u.Label == to.Label { // found reciprocal + last := len(ut) - 1 + ut[i] = ut[last] + unpaired[to.To] = ut[:last] + continue arc + } + } + // reciprocal not found + unpaired[fr] = append(unpaired[fr], to) + } + } + for fr, to := range unpaired { + if len(to) > 0 { + return false, NI(fr), to[0] + } + } + return true, -1, to +} + +// NegativeArc returns true if the receiver graph contains a negative arc. +func (g LabeledAdjacencyList) NegativeArc(w WeightFunc) bool { + for _, nbs := range g { + for _, nb := range nbs { + if w(nb.Label) < 0 { + return true + } + } + } + return false +} + +// Unlabeled constructs the unlabeled graph corresponding to g. +func (g LabeledAdjacencyList) Unlabeled() AdjacencyList { + a := make(AdjacencyList, len(g)) + for n, nbs := range g { + to := make([]NI, len(nbs)) + for i, nb := range nbs { + to[i] = nb.To + } + a[n] = to + } + return a +} + +// WeightedEdgeList constructs a WeightedEdgeList object from a +// LabeledAdjacencyList. +// +// Internally it calls g.EdgeList() to obtain the Edges member. +// See LabeledAdjacencyList.EdgeList(). +func (g LabeledAdjacencyList) WeightedEdgeList(w WeightFunc) *WeightedEdgeList { + return &WeightedEdgeList{ + Order: len(g), + WeightFunc: w, + Edges: g.EdgeList(), + } +} + +// WeightedInDegree computes the weighted in-degree of each node in g +// for a given weight function w. +// +// The weighted in-degree of a node is the sum of weights of arcs going to +// the node. +// +// A weighted degree of a node is often termed the "strength" of a node. +// +// See note for undirected graphs at LabeledAdjacencyList.WeightedOutDegree. +func (g LabeledAdjacencyList) WeightedInDegree(w WeightFunc) []float64 { + ind := make([]float64, len(g)) + for _, to := range g { + for _, to := range to { + ind[to.To] += w(to.Label) + } + } + return ind +} + +// WeightedOutDegree computes the weighted out-degree of the specified node +// for a given weight function w. +// +// The weighted out-degree of a node is the sum of weights of arcs going from +// the node. +// +// A weighted degree of a node is often termed the "strength" of a node. +// +// Note for undirected graphs, the WeightedOutDegree result for a node will +// equal the WeightedInDegree for the node. You can use WeightedInDegree if +// you have need for the weighted degrees of all nodes or use WeightedOutDegree +// to compute the weighted degrees of individual nodes. In either case loops +// are counted just once, unlike the (unweighted) UndirectedDegree methods. +func (g LabeledAdjacencyList) WeightedOutDegree(n NI, w WeightFunc) (d float64) { + for _, to := range g[n] { + d += w(to.Label) + } + return +} + +// More about loops and strength: I didn't see consensus on this especially +// in the case of undirected graphs. Some sources said to add in-degree and +// out-degree, which would seemingly double both loops and non-loops. +// Some said to double loops. Some said sum the edge weights and had no +// comment on loops. R of course makes everything an option. The meaning +// of "strength" where loops exist is unclear. So while I could write an +// UndirectedWeighted degree function that doubles loops but not edges, +// I'm going to just leave this for now. diff --git a/vendor/github.com/soniakeys/graph/adj_RO.go b/vendor/github.com/soniakeys/graph/adj_RO.go new file mode 100644 index 00000000..1d37d14e --- /dev/null +++ b/vendor/github.com/soniakeys/graph/adj_RO.go @@ -0,0 +1,387 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +package graph + +// adj_RO.go is code generated from adj_cg.go by directives in graph.go. +// Editing adj_cg.go is okay. +// DO NOT EDIT adj_RO.go. The RO is for Read Only. + +import ( + "math/rand" + "time" +) + +// ArcSize returns the number of arcs in g. +// +// Note that for an undirected graph without loops, the number of undirected +// edges -- the traditional meaning of graph size -- will be ArcSize()/2. +// On the other hand, if g is an undirected graph that has or may have loops, +// g.ArcSize()/2 is not a meaningful quantity. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) ArcSize() int { + m := 0 + for _, to := range g { + m += len(to) + } + return m +} + +// BoundsOk validates that all arcs in g stay within the slice bounds of g. +// +// BoundsOk returns true when no arcs point outside the bounds of g. +// Otherwise it returns false and an example arc that points outside of g. +// +// Most methods of this package assume the BoundsOk condition and may +// panic when they encounter an arc pointing outside of the graph. This +// function can be used to validate a graph when the BoundsOk condition +// is unknown. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) BoundsOk() (ok bool, fr NI, to NI) { + for fr, to := range g { + for _, to := range to { + if to < 0 || to >= NI(len(g)) { + return false, NI(fr), to + } + } + } + return true, -1, to +} + +// BreadthFirst traverses a directed or undirected graph in breadth first order. +// +// Argument start is the start node for the traversal. If r is nil, nodes are +// visited in deterministic order. If a random number generator is supplied, +// nodes at each level are visited in random order. +// +// Argument f can be nil if you have no interest in the FromList path result. +// If FromList f is non-nil, the method populates f.Paths and sets f.MaxLen. +// It does not set f.Leaves. For convenience argument f can be a zero value +// FromList. If f.Paths is nil, the FromList is initialized first. If f.Paths +// is non-nil however, the FromList is used as is. The method uses a value of +// PathEnd.Len == 0 to indentify unvisited nodes. Existing non-zero values +// will limit the traversal. +// +// Traversal calls the visitor function v for each node starting with node +// start. If v returns true, traversal continues. If v returns false, the +// traversal terminates immediately. PathEnd Len and From values are updated +// before calling the visitor function. +// +// On return f.Paths and f.MaxLen are set but not f.Leaves. +// +// Returned is the number of nodes visited and ok = true if the traversal +// ran to completion or ok = false if it was terminated by the visitor +// function returning false. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) BreadthFirst(start NI, r *rand.Rand, f *FromList, v OkNodeVisitor) (visited int, ok bool) { + switch { + case f == nil: + e := NewFromList(len(g)) + f = &e + case f.Paths == nil: + *f = NewFromList(len(g)) + } + rp := f.Paths + // the frontier consists of nodes all at the same level + frontier := []NI{start} + level := 1 + // assign path when node is put on frontier, + rp[start] = PathEnd{Len: level, From: -1} + for { + f.MaxLen = level + level++ + var next []NI + if r == nil { + for _, n := range frontier { + visited++ + if !v(n) { // visit nodes as they come off frontier + return + } + for _, nb := range g[n] { + if rp[nb].Len == 0 { + next = append(next, nb) + rp[nb] = PathEnd{From: n, Len: level} + } + } + } + } else { // take nodes off frontier at random + for _, i := range r.Perm(len(frontier)) { + n := frontier[i] + // remainder of block same as above + visited++ + if !v(n) { + return + } + for _, nb := range g[n] { + if rp[nb].Len == 0 { + next = append(next, nb) + rp[nb] = PathEnd{From: n, Len: level} + } + } + } + } + if len(next) == 0 { + break + } + frontier = next + } + return visited, true +} + +// BreadthFirstPath finds a single path from start to end with a minimum +// number of nodes. +// +// Returned is the path as list of nodes. +// The result is nil if no path was found. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) BreadthFirstPath(start, end NI) []NI { + var f FromList + g.BreadthFirst(start, nil, &f, func(n NI) bool { return n != end }) + return f.PathTo(end, nil) +} + +// Copy makes a deep copy of g. +// Copy also computes the arc size ma, the number of arcs. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) Copy() (c AdjacencyList, ma int) { + c = make(AdjacencyList, len(g)) + for n, to := range g { + c[n] = append([]NI{}, to...) + ma += len(to) + } + return +} + +// DepthFirst traverses a graph depth first. +// +// As it traverses it calls visitor function v for each node. If v returns +// false at any point, the traversal is terminated immediately and DepthFirst +// returns false. Otherwise DepthFirst returns true. +// +// DepthFirst uses argument bm is used as a bitmap to guide the traversal. +// For a complete traversal, bm should be 0 initially. During the +// traversal, bits are set corresponding to each node visited. +// The bit is set before calling the visitor function. +// +// Argument bm can be nil if you have no need for it. +// In this case a bitmap is created internally for one-time use. +// +// Alternatively v can be nil. In this case traversal still proceeds and +// updates the bitmap, which can be a useful result. +// DepthFirst always returns true in this case. +// +// It makes no sense for both bm and v to be nil. In this case DepthFirst +// returns false immediately. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) DepthFirst(start NI, bm *Bits, v OkNodeVisitor) (ok bool) { + if bm == nil { + if v == nil { + return false + } + bm = &Bits{} + } + var df func(n NI) bool + df = func(n NI) bool { + if bm.Bit(n) == 1 { + return true + } + bm.SetBit(n, 1) + if v != nil && !v(n) { + return false + } + for _, nb := range g[n] { + if !df(nb) { + return false + } + } + return true + } + return df(start) +} + +// DepthFirstRandom traverses a graph depth first, but following arcs in +// random order among arcs from a single node. +// +// If Rand r is nil, the method creates a new source and generator for +// one-time use. +// +// Usage is otherwise like the DepthFirst method. See DepthFirst. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) DepthFirstRandom(start NI, bm *Bits, v OkNodeVisitor, r *rand.Rand) (ok bool) { + if bm == nil { + if v == nil { + return false + } + bm = &Bits{} + } + if r == nil { + r = rand.New(rand.NewSource(time.Now().UnixNano())) + } + var df func(n NI) bool + df = func(n NI) bool { + if bm.Bit(n) == 1 { + return true + } + bm.SetBit(n, 1) + if v != nil && !v(n) { + return false + } + to := g[n] + for _, i := range r.Perm(len(to)) { + if !df(to[i]) { + return false + } + } + return true + } + return df(start) +} + +// HasArc returns true if g has any arc from node fr to node to. +// +// Also returned is the index within the slice of arcs from node fr. +// If no arc from fr to to is present, HasArc returns false, -1. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) HasArc(fr, to NI) (bool, int) { + for x, h := range g[fr] { + if h == to { + return true, x + } + } + return false, -1 +} + +// HasLoop identifies if a graph contains a loop, an arc that leads from a +// a node back to the same node. +// +// If the graph has a loop, the result is an example node that has a loop. +// +// If g contains a loop, the method returns true and an example of a node +// with a loop. If there are no loops in g, the method returns false, -1. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) HasLoop() (bool, NI) { + for fr, to := range g { + for _, to := range to { + if NI(fr) == to { + return true, to + } + } + } + return false, -1 +} + +// HasParallelMap identifies if a graph contains parallel arcs, multiple arcs +// that lead from a node to the same node. +// +// If the graph has parallel arcs, the method returns true and +// results fr and to represent an example where there are parallel arcs +// from node fr to node to. +// +// If there are no parallel arcs, the method returns false, -1 -1. +// +// Multiple loops on a node count as parallel arcs. +// +// "Map" in the method name indicates that a Go map is used to detect parallel +// arcs. Compared to method HasParallelSort, this gives better asymtotic +// performance for large dense graphs but may have increased overhead for +// small or sparse graphs. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) HasParallelMap() (has bool, fr, to NI) { + for n, to := range g { + if len(to) == 0 { + continue + } + m := map[NI]struct{}{} + for _, to := range to { + if _, ok := m[to]; ok { + return true, NI(n), to + } + m[to] = struct{}{} + } + } + return false, -1, -1 +} + +// IsSimple checks for loops and parallel arcs. +// +// A graph is "simple" if it has no loops or parallel arcs. +// +// IsSimple returns true, -1 for simple graphs. If a loop or parallel arc is +// found, simple returns false and a node that represents a counterexample +// to the graph being simple. +// +// See also separate methods HasLoop and HasParallel. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) IsSimple() (ok bool, n NI) { + if lp, n := g.HasLoop(); lp { + return false, n + } + if pa, n, _ := g.HasParallelSort(); pa { + return false, n + } + return true, -1 +} + +// IsolatedNodes returns a bitmap of isolated nodes in receiver graph g. +// +// An isolated node is one with no arcs going to or from it. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) IsolatedNodes() (i Bits) { + i.SetAll(len(g)) + for fr, to := range g { + if len(to) > 0 { + i.SetBit(NI(fr), 0) + for _, to := range to { + i.SetBit(to, 0) + } + } + } + return +} + +/* +MaxmimalClique finds a maximal clique containing the node n. + +Not sure this is good for anything. It produces a single maximal clique +but there can be multiple maximal cliques containing a given node. +This algorithm just returns one of them, not even necessarily the +largest one. + +func (g LabeledAdjacencyList) MaximalClique(n int) []int { + c := []int{n} + var m bitset.BitSet + m.Set(uint(n)) + for fr, to := range g { + if fr == n { + continue + } + if len(to) < len(c) { + continue + } + f := 0 + for _, to := range to { + if m.Test(uint(to.To)) { + f++ + if f == len(c) { + c = append(c, to.To) + m.Set(uint(to.To)) + break + } + } + } + } + return c +} +*/ diff --git a/vendor/github.com/soniakeys/graph/adj_cg.go b/vendor/github.com/soniakeys/graph/adj_cg.go new file mode 100644 index 00000000..a484ee04 --- /dev/null +++ b/vendor/github.com/soniakeys/graph/adj_cg.go @@ -0,0 +1,387 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +package graph + +// adj_RO.go is code generated from adj_cg.go by directives in graph.go. +// Editing adj_cg.go is okay. +// DO NOT EDIT adj_RO.go. The RO is for Read Only. + +import ( + "math/rand" + "time" +) + +// ArcSize returns the number of arcs in g. +// +// Note that for an undirected graph without loops, the number of undirected +// edges -- the traditional meaning of graph size -- will be ArcSize()/2. +// On the other hand, if g is an undirected graph that has or may have loops, +// g.ArcSize()/2 is not a meaningful quantity. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) ArcSize() int { + m := 0 + for _, to := range g { + m += len(to) + } + return m +} + +// BoundsOk validates that all arcs in g stay within the slice bounds of g. +// +// BoundsOk returns true when no arcs point outside the bounds of g. +// Otherwise it returns false and an example arc that points outside of g. +// +// Most methods of this package assume the BoundsOk condition and may +// panic when they encounter an arc pointing outside of the graph. This +// function can be used to validate a graph when the BoundsOk condition +// is unknown. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) BoundsOk() (ok bool, fr NI, to Half) { + for fr, to := range g { + for _, to := range to { + if to.To < 0 || to.To >= NI(len(g)) { + return false, NI(fr), to + } + } + } + return true, -1, to +} + +// BreadthFirst traverses a directed or undirected graph in breadth first order. +// +// Argument start is the start node for the traversal. If r is nil, nodes are +// visited in deterministic order. If a random number generator is supplied, +// nodes at each level are visited in random order. +// +// Argument f can be nil if you have no interest in the FromList path result. +// If FromList f is non-nil, the method populates f.Paths and sets f.MaxLen. +// It does not set f.Leaves. For convenience argument f can be a zero value +// FromList. If f.Paths is nil, the FromList is initialized first. If f.Paths +// is non-nil however, the FromList is used as is. The method uses a value of +// PathEnd.Len == 0 to indentify unvisited nodes. Existing non-zero values +// will limit the traversal. +// +// Traversal calls the visitor function v for each node starting with node +// start. If v returns true, traversal continues. If v returns false, the +// traversal terminates immediately. PathEnd Len and From values are updated +// before calling the visitor function. +// +// On return f.Paths and f.MaxLen are set but not f.Leaves. +// +// Returned is the number of nodes visited and ok = true if the traversal +// ran to completion or ok = false if it was terminated by the visitor +// function returning false. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) BreadthFirst(start NI, r *rand.Rand, f *FromList, v OkNodeVisitor) (visited int, ok bool) { + switch { + case f == nil: + e := NewFromList(len(g)) + f = &e + case f.Paths == nil: + *f = NewFromList(len(g)) + } + rp := f.Paths + // the frontier consists of nodes all at the same level + frontier := []NI{start} + level := 1 + // assign path when node is put on frontier, + rp[start] = PathEnd{Len: level, From: -1} + for { + f.MaxLen = level + level++ + var next []NI + if r == nil { + for _, n := range frontier { + visited++ + if !v(n) { // visit nodes as they come off frontier + return + } + for _, nb := range g[n] { + if rp[nb.To].Len == 0 { + next = append(next, nb.To) + rp[nb.To] = PathEnd{From: n, Len: level} + } + } + } + } else { // take nodes off frontier at random + for _, i := range r.Perm(len(frontier)) { + n := frontier[i] + // remainder of block same as above + visited++ + if !v(n) { + return + } + for _, nb := range g[n] { + if rp[nb.To].Len == 0 { + next = append(next, nb.To) + rp[nb.To] = PathEnd{From: n, Len: level} + } + } + } + } + if len(next) == 0 { + break + } + frontier = next + } + return visited, true +} + +// BreadthFirstPath finds a single path from start to end with a minimum +// number of nodes. +// +// Returned is the path as list of nodes. +// The result is nil if no path was found. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) BreadthFirstPath(start, end NI) []NI { + var f FromList + g.BreadthFirst(start, nil, &f, func(n NI) bool { return n != end }) + return f.PathTo(end, nil) +} + +// Copy makes a deep copy of g. +// Copy also computes the arc size ma, the number of arcs. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) Copy() (c LabeledAdjacencyList, ma int) { + c = make(LabeledAdjacencyList, len(g)) + for n, to := range g { + c[n] = append([]Half{}, to...) + ma += len(to) + } + return +} + +// DepthFirst traverses a graph depth first. +// +// As it traverses it calls visitor function v for each node. If v returns +// false at any point, the traversal is terminated immediately and DepthFirst +// returns false. Otherwise DepthFirst returns true. +// +// DepthFirst uses argument bm is used as a bitmap to guide the traversal. +// For a complete traversal, bm should be 0 initially. During the +// traversal, bits are set corresponding to each node visited. +// The bit is set before calling the visitor function. +// +// Argument bm can be nil if you have no need for it. +// In this case a bitmap is created internally for one-time use. +// +// Alternatively v can be nil. In this case traversal still proceeds and +// updates the bitmap, which can be a useful result. +// DepthFirst always returns true in this case. +// +// It makes no sense for both bm and v to be nil. In this case DepthFirst +// returns false immediately. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) DepthFirst(start NI, bm *Bits, v OkNodeVisitor) (ok bool) { + if bm == nil { + if v == nil { + return false + } + bm = &Bits{} + } + var df func(n NI) bool + df = func(n NI) bool { + if bm.Bit(n) == 1 { + return true + } + bm.SetBit(n, 1) + if v != nil && !v(n) { + return false + } + for _, nb := range g[n] { + if !df(nb.To) { + return false + } + } + return true + } + return df(start) +} + +// DepthFirstRandom traverses a graph depth first, but following arcs in +// random order among arcs from a single node. +// +// If Rand r is nil, the method creates a new source and generator for +// one-time use. +// +// Usage is otherwise like the DepthFirst method. See DepthFirst. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) DepthFirstRandom(start NI, bm *Bits, v OkNodeVisitor, r *rand.Rand) (ok bool) { + if bm == nil { + if v == nil { + return false + } + bm = &Bits{} + } + if r == nil { + r = rand.New(rand.NewSource(time.Now().UnixNano())) + } + var df func(n NI) bool + df = func(n NI) bool { + if bm.Bit(n) == 1 { + return true + } + bm.SetBit(n, 1) + if v != nil && !v(n) { + return false + } + to := g[n] + for _, i := range r.Perm(len(to)) { + if !df(to[i].To) { + return false + } + } + return true + } + return df(start) +} + +// HasArc returns true if g has any arc from node fr to node to. +// +// Also returned is the index within the slice of arcs from node fr. +// If no arc from fr to to is present, HasArc returns false, -1. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) HasArc(fr, to NI) (bool, int) { + for x, h := range g[fr] { + if h.To == to { + return true, x + } + } + return false, -1 +} + +// HasLoop identifies if a graph contains a loop, an arc that leads from a +// a node back to the same node. +// +// If the graph has a loop, the result is an example node that has a loop. +// +// If g contains a loop, the method returns true and an example of a node +// with a loop. If there are no loops in g, the method returns false, -1. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) HasLoop() (bool, NI) { + for fr, to := range g { + for _, to := range to { + if NI(fr) == to.To { + return true, to.To + } + } + } + return false, -1 +} + +// HasParallelMap identifies if a graph contains parallel arcs, multiple arcs +// that lead from a node to the same node. +// +// If the graph has parallel arcs, the method returns true and +// results fr and to represent an example where there are parallel arcs +// from node fr to node to. +// +// If there are no parallel arcs, the method returns false, -1 -1. +// +// Multiple loops on a node count as parallel arcs. +// +// "Map" in the method name indicates that a Go map is used to detect parallel +// arcs. Compared to method HasParallelSort, this gives better asymtotic +// performance for large dense graphs but may have increased overhead for +// small or sparse graphs. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) HasParallelMap() (has bool, fr, to NI) { + for n, to := range g { + if len(to) == 0 { + continue + } + m := map[NI]struct{}{} + for _, to := range to { + if _, ok := m[to.To]; ok { + return true, NI(n), to.To + } + m[to.To] = struct{}{} + } + } + return false, -1, -1 +} + +// IsSimple checks for loops and parallel arcs. +// +// A graph is "simple" if it has no loops or parallel arcs. +// +// IsSimple returns true, -1 for simple graphs. If a loop or parallel arc is +// found, simple returns false and a node that represents a counterexample +// to the graph being simple. +// +// See also separate methods HasLoop and HasParallel. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) IsSimple() (ok bool, n NI) { + if lp, n := g.HasLoop(); lp { + return false, n + } + if pa, n, _ := g.HasParallelSort(); pa { + return false, n + } + return true, -1 +} + +// IsolatedNodes returns a bitmap of isolated nodes in receiver graph g. +// +// An isolated node is one with no arcs going to or from it. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) IsolatedNodes() (i Bits) { + i.SetAll(len(g)) + for fr, to := range g { + if len(to) > 0 { + i.SetBit(NI(fr), 0) + for _, to := range to { + i.SetBit(to.To, 0) + } + } + } + return +} + +/* +MaxmimalClique finds a maximal clique containing the node n. + +Not sure this is good for anything. It produces a single maximal clique +but there can be multiple maximal cliques containing a given node. +This algorithm just returns one of them, not even necessarily the +largest one. + +func (g LabeledAdjacencyList) MaximalClique(n int) []int { + c := []int{n} + var m bitset.BitSet + m.Set(uint(n)) + for fr, to := range g { + if fr == n { + continue + } + if len(to) < len(c) { + continue + } + f := 0 + for _, to := range to { + if m.Test(uint(to.To)) { + f++ + if f == len(c) { + c = append(c, to.To) + m.Set(uint(to.To)) + break + } + } + } + } + return c +} +*/ diff --git a/vendor/github.com/soniakeys/graph/bits.go b/vendor/github.com/soniakeys/graph/bits.go new file mode 100644 index 00000000..b86703ca --- /dev/null +++ b/vendor/github.com/soniakeys/graph/bits.go @@ -0,0 +1,207 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +package graph + +import ( + "fmt" + "math/big" +) + +// Bits is bitmap, or bitset, intended to store a single bit of information +// per node of a graph. +// +// The current implementation is backed by a big.Int and so is a reference +// type in the same way a big.Int is. +type Bits struct { + i big.Int +} + +// NewBits constructs a Bits value with the bits ns set to 1. +func NewBits(ns ...NI) (b Bits) { + for _, n := range ns { + b.SetBit(n, 1) + } + return +} + +// AllNot sets n bits of z to the complement of x. +// +// It is a convenience method for SetAll followed by AndNot. +func (z *Bits) AllNot(n int, x Bits) { + var y Bits + y.SetAll(n) + z.AndNot(y, x) +} + +// And sets z = x & y. +func (z *Bits) And(x, y Bits) { + z.i.And(&x.i, &y.i) +} + +// AndNot sets z = x &^ y. +func (z *Bits) AndNot(x, y Bits) { + z.i.AndNot(&x.i, &y.i) +} + +// Bit returns the value of the n'th bit of x. +func (b Bits) Bit(n NI) uint { + return b.i.Bit(int(n)) +} + +// Clear sets all bits to 0. +func (z *Bits) Clear() { + *z = Bits{} +} + +// Format satisfies fmt.Formatter for fmt.Printf and related methods. +// +// graph.Bits format exactly like big.Ints. +func (b Bits) Format(s fmt.State, ch rune) { + b.i.Format(s, ch) +} + +// From returns the position of the first 1 bit at or after (from) position n. +// +// It returns -1 if there is no one bit at or after position n. +// +// This provides one way to iterate over one bits. +// To iterate over the one bits, call with n = 0 to get the the first +// one bit, then call with the result + 1 to get successive one bits. +// Unlike the Iterate method, this technique is stateless and so allows +// bits to be changed between successive calls. +// +// See also Iterate. +// +// (From is just a short word that means "at or after" here; +// it has nothing to do with arc direction.) +func (b Bits) From(n NI) NI { + words := b.i.Bits() + i := int(n) + x := i >> wordExp // x now index of word containing bit i. + if x >= len(words) { + return -1 + } + // test for 1 in this word at or after n + if wx := words[x] >> (uint(i) & (wordSize - 1)); wx != 0 { + return n + NI(trailingZeros(wx)) + } + x++ + for y, wy := range words[x:] { + if wy != 0 { + return NI((x+y)<>= uint(t + 1) + if w == 0 { + break + } + t = trailingZeros(w) + i += 1 + t + } + } + } + return true +} + +// Or sets z = x | y. +func (z *Bits) Or(x, y Bits) { + z.i.Or(&x.i, &y.i) +} + +// PopCount returns the number of 1 bits. +func (b Bits) PopCount() (c int) { + // algorithm selected to be efficient for sparse bit sets. + for _, w := range b.i.Bits() { + for w != 0 { + w &= w - 1 + c++ + } + } + return +} + +// Set sets the bits of z to the bits of x. +func (z *Bits) Set(x Bits) { + z.i.Set(&x.i) +} + +var one = big.NewInt(1) + +// SetAll sets z to have n 1 bits. +// +// It's useful for initializing z to have a 1 for each node of a graph. +func (z *Bits) SetAll(n int) { + z.i.Sub(z.i.Lsh(one, uint(n)), one) +} + +// SetBit sets the n'th bit to b, where be is a 0 or 1. +func (z *Bits) SetBit(n NI, b uint) { + z.i.SetBit(&z.i, int(n), b) +} + +// Single returns true if b has exactly one 1 bit. +func (b Bits) Single() bool { + // like PopCount, but stop as soon as two are found + c := 0 + for _, w := range b.i.Bits() { + for w != 0 { + w &= w - 1 + c++ + if c == 2 { + return false + } + } + } + return c == 1 +} + +// Slice returns a slice with the positions of each 1 bit. +func (b Bits) Slice() (s []NI) { + // (alternative implementation might use Popcount and make to get the + // exact cap slice up front. unclear if that would be better.) + b.Iterate(func(n NI) bool { + s = append(s, n) + return true + }) + return +} + +// Xor sets z = x ^ y. +func (z *Bits) Xor(x, y Bits) { + z.i.Xor(&x.i, &y.i) +} + +// Zero returns true if there are no 1 bits. +func (b Bits) Zero() bool { + return len(b.i.Bits()) == 0 +} + +// trailingZeros returns the number of trailing 0 bits in v. +// +// If v is 0, it returns 0. +func trailingZeros(v big.Word) int { + return deBruijnBits[v&-v*deBruijnMultiple>>deBruijnShift] +} diff --git a/vendor/github.com/soniakeys/graph/bits32.go b/vendor/github.com/soniakeys/graph/bits32.go new file mode 100644 index 00000000..18e07f9a --- /dev/null +++ b/vendor/github.com/soniakeys/graph/bits32.go @@ -0,0 +1,23 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +// +build 386 arm + +package graph + +// "word" here is math/big.Word +const ( + wordSize = 32 + wordExp = 5 // 2^5 = 32 +) + +// deBruijn magic numbers used in trailingZeros() +// +// reference: http://graphics.stanford.edu/~seander/bithacks.html +const deBruijnMultiple = 0x077CB531 +const deBruijnShift = 27 + +var deBruijnBits = []int{ + 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, + 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9, +} diff --git a/vendor/github.com/soniakeys/graph/bits64.go b/vendor/github.com/soniakeys/graph/bits64.go new file mode 100644 index 00000000..ab601dd6 --- /dev/null +++ b/vendor/github.com/soniakeys/graph/bits64.go @@ -0,0 +1,22 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +// +build !386,!arm + +package graph + +const ( + wordSize = 64 + wordExp = 6 // 2^6 = 64 +) + +// reference: http://graphics.stanford.edu/~seander/bithacks.html +const deBruijnMultiple = 0x03f79d71b4ca8b09 +const deBruijnShift = 58 + +var deBruijnBits = []int{ + 0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4, + 62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5, + 63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11, + 54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6, +} diff --git a/vendor/github.com/soniakeys/graph/dir.go b/vendor/github.com/soniakeys/graph/dir.go new file mode 100644 index 00000000..508306d1 --- /dev/null +++ b/vendor/github.com/soniakeys/graph/dir.go @@ -0,0 +1,538 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +package graph + +// dir.go has methods specific to directed graphs, types Directed and +// LabeledDirected. +// +// Methods on Directed are first, with exported methods alphabetized. + +import "errors" + +// DAGMaxLenPath finds a maximum length path in a directed acyclic graph. +// +// Argument ordering must be a topological ordering of g. +func (g Directed) DAGMaxLenPath(ordering []NI) (path []NI) { + // dynamic programming. visit nodes in reverse order. for each, compute + // longest path as one plus longest of 'to' nodes. + // Visits each arc once. O(m). + // + // Similar code in label.go + var n NI + mlp := make([][]NI, len(g.AdjacencyList)) // index by node number + for i := len(ordering) - 1; i >= 0; i-- { + fr := ordering[i] // node number + to := g.AdjacencyList[fr] + if len(to) == 0 { + continue + } + mt := to[0] + for _, to := range to[1:] { + if len(mlp[to]) > len(mlp[mt]) { + mt = to + } + } + p := append([]NI{mt}, mlp[mt]...) + mlp[fr] = p + if len(p) > len(path) { + n = fr + path = p + } + } + return append([]NI{n}, path...) +} + +// EulerianCycle finds an Eulerian cycle in a directed multigraph. +// +// * If g has no nodes, result is nil, nil. +// +// * If g is Eulerian, result is an Eulerian cycle with err = nil. +// The cycle result is a list of nodes, where the first and last +// nodes are the same. +// +// * Otherwise, result is nil, error +// +// Internally, EulerianCycle copies the entire graph g. +// See EulerianCycleD for a more space efficient version. +func (g Directed) EulerianCycle() ([]NI, error) { + c, m := g.Copy() + return c.EulerianCycleD(m) +} + +// EulerianCycleD finds an Eulerian cycle in a directed multigraph. +// +// EulerianCycleD is destructive on its receiver g. See EulerianCycle for +// a non-destructive version. +// +// Argument ma must be the correct arc size, or number of arcs in g. +// +// * If g has no nodes, result is nil, nil. +// +// * If g is Eulerian, result is an Eulerian cycle with err = nil. +// The cycle result is a list of nodes, where the first and last +// nodes are the same. +// +// * Otherwise, result is nil, error +func (g Directed) EulerianCycleD(ma int) ([]NI, error) { + if len(g.AdjacencyList) == 0 { + return nil, nil + } + e := newEulerian(g.AdjacencyList, ma) + for e.s >= 0 { + v := e.top() // v is node that starts cycle + e.push() + // if Eulerian, we'll always come back to starting node + if e.top() != v { + return nil, errors.New("not balanced") + } + e.keep() + } + if !e.uv.Zero() { + return nil, errors.New("not strongly connected") + } + return e.p, nil +} + +// EulerianPath finds an Eulerian path in a directed multigraph. +// +// * If g has no nodes, result is nil, nil. +// +// * If g has an Eulerian path, result is an Eulerian path with err = nil. +// The path result is a list of nodes, where the first node is start. +// +// * Otherwise, result is nil, error +// +// Internally, EulerianPath copies the entire graph g. +// See EulerianPathD for a more space efficient version. +func (g Directed) EulerianPath() ([]NI, error) { + ind := g.InDegree() + var start NI + for n, to := range g.AdjacencyList { + if len(to) > ind[n] { + start = NI(n) + break + } + } + c, m := g.Copy() + return c.EulerianPathD(m, start) +} + +// EulerianPathD finds an Eulerian path in a directed multigraph. +// +// EulerianPathD is destructive on its receiver g. See EulerianPath for +// a non-destructive version. +// +// Argument ma must be the correct arc size, or number of arcs in g. +// Argument start must be a valid start node for the path. +// +// * If g has no nodes, result is nil, nil. +// +// * If g has an Eulerian path, result is an Eulerian path with err = nil. +// The path result is a list of nodes, where the first node is start. +// +// * Otherwise, result is nil, error +func (g Directed) EulerianPathD(ma int, start NI) ([]NI, error) { + if len(g.AdjacencyList) == 0 { + return nil, nil + } + e := newEulerian(g.AdjacencyList, ma) + e.p[0] = start + // unlike EulerianCycle, the first path doesn't have be a cycle. + e.push() + e.keep() + for e.s >= 0 { + start = e.top() + e.push() + // paths after the first must be cycles though + // (as long as there are nodes on the stack) + if e.top() != start { + return nil, errors.New("no Eulerian path") + } + e.keep() + } + if !e.uv.Zero() { + return nil, errors.New("no Eulerian path") + } + return e.p, nil +} + +// starting at the node on the top of the stack, follow arcs until stuck. +// mark nodes visited, push nodes on stack, remove arcs from g. +func (e *eulerian) push() { + for u := e.top(); ; { + e.uv.SetBit(u, 0) // reset unvisited bit + arcs := e.g[u] + if len(arcs) == 0 { + return // stuck + } + w := arcs[0] // follow first arc + e.s++ // push followed node on stack + e.p[e.s] = w + e.g[u] = arcs[1:] // consume arc + u = w + } +} + +// like push, but for for undirected graphs. +func (e *eulerian) pushUndir() { + for u := e.top(); ; { + e.uv.SetBit(u, 0) + arcs := e.g[u] + if len(arcs) == 0 { + return + } + w := arcs[0] + e.s++ + e.p[e.s] = w + e.g[u] = arcs[1:] // consume arc + // here is the only difference, consume reciprocal arc as well: + a2 := e.g[w] + for x, rx := range a2 { + if rx == u { // here it is + last := len(a2) - 1 + a2[x] = a2[last] // someone else gets the seat + e.g[w] = a2[:last] // and it's gone. + break + } + } + u = w + } +} + +// starting with the node on top of the stack, move nodes with no arcs. +func (e *eulerian) keep() { + for e.s >= 0 { + n := e.top() + if len(e.g[n]) > 0 { + break + } + e.p[e.m] = n + e.s-- + e.m-- + } +} + +type eulerian struct { + g AdjacencyList // working copy of graph, it gets consumed + m int // number of arcs in g, updated as g is consumed + uv Bits // unvisited + // low end of p is stack of unfinished nodes + // high end is finished path + p []NI // stack + path + s int // stack pointer +} + +func (e *eulerian) top() NI { + return e.p[e.s] +} + +func newEulerian(g AdjacencyList, m int) *eulerian { + e := &eulerian{ + g: g, + m: m, + p: make([]NI, m+1), + } + e.uv.SetAll(len(g)) + return e +} + +// MaximalNonBranchingPaths finds all paths in a directed graph that are +// "maximal" and "non-branching". +// +// A non-branching path is one where path nodes other than the first and last +// have exactly one arc leading to the node and one arc leading from the node, +// thus there is no possibility to branch away to a different path. +// +// A maximal non-branching path cannot be extended to a longer non-branching +// path by including another node at either end. +// +// In the case of a cyclic non-branching path, the first and last elements +// of the path will be the same node, indicating an isolated cycle. +// +// The method calls the emit argument for each path or isolated cycle in g, +// as long as emit returns true. If emit returns false, +// MaximalNonBranchingPaths returns immediately. +func (g Directed) MaximalNonBranchingPaths(emit func([]NI) bool) { + ind := g.InDegree() + var uv Bits + uv.SetAll(len(g.AdjacencyList)) + for v, vTo := range g.AdjacencyList { + if !(ind[v] == 1 && len(vTo) == 1) { + for _, w := range vTo { + n := []NI{NI(v), w} + uv.SetBit(NI(v), 0) + uv.SetBit(w, 0) + wTo := g.AdjacencyList[w] + for ind[w] == 1 && len(wTo) == 1 { + u := wTo[0] + n = append(n, u) + uv.SetBit(u, 0) + w = u + wTo = g.AdjacencyList[w] + } + if !emit(n) { // n is a path + return + } + } + } + } + // use uv.From rather than uv.Iterate. + // Iterate doesn't work here because we're modifying uv + for b := uv.From(0); b >= 0; b = uv.From(b + 1) { + v := NI(b) + n := []NI{v} + for w := v; ; { + w = g.AdjacencyList[w][0] + uv.SetBit(w, 0) + n = append(n, w) + if w == v { + break + } + } + if !emit(n) { // n is an isolated cycle + return + } + } +} + +// Undirected returns copy of g augmented as needed to make it undirected. +func (g Directed) Undirected() Undirected { + c, _ := g.AdjacencyList.Copy() // start with a copy + rw := make(AdjacencyList, len(g.AdjacencyList)) // "reciprocals wanted" + for fr, to := range g.AdjacencyList { + arc: // for each arc in g + for _, to := range to { + if to == NI(fr) { + continue // loop + } + // search wanted arcs + wf := rw[fr] + for i, w := range wf { + if w == to { // found, remove + last := len(wf) - 1 + wf[i] = wf[last] + rw[fr] = wf[:last] + continue arc + } + } + // arc not found, add to reciprocal to wanted list + rw[to] = append(rw[to], NI(fr)) + } + } + // add missing reciprocals + for fr, to := range rw { + c[fr] = append(c[fr], to...) + } + return Undirected{c} +} + +// StronglyConnectedComponents identifies strongly connected components +// in a directed graph. +// +// Algorithm by David J. Pearce, from "An Improved Algorithm for Finding the +// Strongly Connected Components of a Directed Graph". It is algorithm 3, +// PEA_FIND_SCC2 in +// http://homepages.mcs.vuw.ac.nz/~djp/files/P05.pdf, accessed 22 Feb 2015. +// +// Returned is a list of components, each component is a list of nodes. +/* +func (g Directed) StronglyConnectedComponents() []int { + rindex := make([]int, len(g)) + S := []int{} + index := 1 + c := len(g) - 1 + visit := func(v int) { + root := true + rindex[v] = index + index++ + for _, w := range g[v] { + if rindex[w] == 0 { + visit(w) + } + if rindex[w] < rindex[v] { + rindex[v] = rindex[w] + root = false + } + } + if root { + index-- + for top := len(S) - 1; top >= 0 && rindex[v] <= rindex[top]; top-- { + w = rindex[top] + S = S[:top] + rindex[w] = c + index-- + } + rindex[v] = c + c-- + } else { + S = append(S, v) + } + } + for v := range g { + if rindex[v] == 0 { + visit(v) + } + } + return rindex +} +*/ + +// Transpose constructs a new adjacency list with all arcs reversed. +// +// For every arc from->to of g, the result will have an arc to->from. +// Transpose also counts arcs as it traverses and returns ma the number of arcs +// in g (equal to the number of arcs in the result.) +func (g Directed) Transpose() (t Directed, ma int) { + ta := make(AdjacencyList, len(g.AdjacencyList)) + for n, nbs := range g.AdjacencyList { + for _, nb := range nbs { + ta[nb] = append(ta[nb], NI(n)) + ma++ + } + } + return Directed{ta}, ma +} + +// DAGMaxLenPath finds a maximum length path in a directed acyclic graph. +// +// Length here means number of nodes or arcs, not a sum of arc weights. +// +// Argument ordering must be a topological ordering of g. +// +// Returned is a node beginning a maximum length path, and a path of arcs +// starting from that node. +func (g LabeledDirected) DAGMaxLenPath(ordering []NI) (n NI, path []Half) { + // dynamic programming. visit nodes in reverse order. for each, compute + // longest path as one plus longest of 'to' nodes. + // Visits each arc once. Time complexity O(m). + // + // Similar code in dir.go. + mlp := make([][]Half, len(g.LabeledAdjacencyList)) // index by node number + for i := len(ordering) - 1; i >= 0; i-- { + fr := ordering[i] // node number + to := g.LabeledAdjacencyList[fr] + if len(to) == 0 { + continue + } + mt := to[0] + for _, to := range to[1:] { + if len(mlp[to.To]) > len(mlp[mt.To]) { + mt = to + } + } + p := append([]Half{mt}, mlp[mt.To]...) + mlp[fr] = p + if len(p) > len(path) { + n = fr + path = p + } + } + return +} + +// FromListLabels transposes a labeled graph into a FromList and associated +// list of labels. +// +// Receiver g should be connected as a tree or forest. Specifically no node +// can have multiple incoming arcs. If any node n in g has multiple incoming +// arcs, the method returns (nil, nil, n) where n is a node with multiple +// incoming arcs. +// +// Otherwise (normally) the method populates the From members in a +// FromList.Path, populates a slice of labels, and returns the FromList, +// labels, and -1. +// +// Other members of the FromList are left as zero values. +// Use FromList.RecalcLen and FromList.RecalcLeaves as needed. +func (g LabeledDirected) FromListLabels() (*FromList, []LI, NI) { + labels := make([]LI, len(g.LabeledAdjacencyList)) + paths := make([]PathEnd, len(g.LabeledAdjacencyList)) + for i := range paths { + paths[i].From = -1 + } + for fr, to := range g.LabeledAdjacencyList { + for _, to := range to { + if paths[to.To].From >= 0 { + return nil, nil, to.To + } + paths[to.To].From = NI(fr) + labels[to.To] = to.Label + } + } + return &FromList{Paths: paths}, labels, -1 +} + +// Transpose constructs a new adjacency list that is the transpose of g. +// +// For every arc from->to of g, the result will have an arc to->from. +// Transpose also counts arcs as it traverses and returns ma the number of +// arcs in g (equal to the number of arcs in the result.) +func (g LabeledDirected) Transpose() (t LabeledDirected, ma int) { + ta := make(LabeledAdjacencyList, len(g.LabeledAdjacencyList)) + for n, nbs := range g.LabeledAdjacencyList { + for _, nb := range nbs { + ta[nb.To] = append(ta[nb.To], Half{To: NI(n), Label: nb.Label}) + ma++ + } + } + return LabeledDirected{ta}, ma +} + +// Undirected returns a new undirected graph derived from g, augmented as +// needed to make it undirected, with reciprocal arcs having matching labels. +func (g LabeledDirected) Undirected() LabeledUndirected { + c, _ := g.LabeledAdjacencyList.Copy() // start with a copy + // "reciprocals wanted" + rw := make(LabeledAdjacencyList, len(g.LabeledAdjacencyList)) + for fr, to := range g.LabeledAdjacencyList { + arc: // for each arc in g + for _, to := range to { + if to.To == NI(fr) { + continue // arc is a loop + } + // search wanted arcs + wf := rw[fr] + for i, w := range wf { + if w == to { // found, remove + last := len(wf) - 1 + wf[i] = wf[last] + rw[fr] = wf[:last] + continue arc + } + } + // arc not found, add to reciprocal to wanted list + rw[to.To] = append(rw[to.To], Half{To: NI(fr), Label: to.Label}) + } + } + // add missing reciprocals + for fr, to := range rw { + c[fr] = append(c[fr], to...) + } + return LabeledUndirected{c} +} + +// Unlabeled constructs the unlabeled directed graph corresponding to g. +func (g LabeledDirected) Unlabeled() Directed { + return Directed{g.LabeledAdjacencyList.Unlabeled()} +} + +// UnlabeledTranspose constructs a new adjacency list that is the unlabeled +// transpose of g. +// +// For every arc from->to of g, the result will have an arc to->from. +// Transpose also counts arcs as it traverses and returns ma, the number of +// arcs in g (equal to the number of arcs in the result.) +// +// It is equivalent to g.Unlabeled().Transpose() but constructs the result +// directly. +func (g LabeledDirected) UnlabeledTranspose() (t Directed, ma int) { + ta := make(AdjacencyList, len(g.LabeledAdjacencyList)) + for n, nbs := range g.LabeledAdjacencyList { + for _, nb := range nbs { + ta[nb.To] = append(ta[nb.To], NI(n)) + ma++ + } + } + return Directed{ta}, ma +} diff --git a/vendor/github.com/soniakeys/graph/dir_RO.go b/vendor/github.com/soniakeys/graph/dir_RO.go new file mode 100644 index 00000000..77558a96 --- /dev/null +++ b/vendor/github.com/soniakeys/graph/dir_RO.go @@ -0,0 +1,395 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +package graph + +// dir_RO.go is code generated from dir_cg.go by directives in graph.go. +// Editing dir_cg.go is okay. It is the code generation source. +// DO NOT EDIT dir_RO.go. +// The RO means read only and it is upper case RO to slow you down a bit +// in case you start to edit the file. + +// Balanced returns true if for every node in g, in-degree equals out-degree. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Directed) Balanced() bool { + for n, in := range g.InDegree() { + if in != len(g.AdjacencyList[n]) { + return false + } + } + return true +} + +// Copy makes a deep copy of g. +// Copy also computes the arc size ma, the number of arcs. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Directed) Copy() (c Directed, ma int) { + l, s := g.AdjacencyList.Copy() + return Directed{l}, s +} + +// Cyclic determines if g contains a cycle, a non-empty path from a node +// back to itself. +// +// Cyclic returns true if g contains at least one cycle. It also returns +// an example of an arc involved in a cycle. +// Cyclic returns false if g is acyclic. +// +// Also see Topological, which detects cycles. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Directed) Cyclic() (cyclic bool, fr NI, to NI) { + a := g.AdjacencyList + fr, to = -1, -1 + var temp, perm Bits + var df func(NI) + df = func(n NI) { + switch { + case temp.Bit(n) == 1: + cyclic = true + return + case perm.Bit(n) == 1: + return + } + temp.SetBit(n, 1) + for _, nb := range a[n] { + df(nb) + if cyclic { + if fr < 0 { + fr, to = n, nb + } + return + } + } + temp.SetBit(n, 0) + perm.SetBit(n, 1) + } + for n := range a { + if perm.Bit(NI(n)) == 1 { + continue + } + if df(NI(n)); cyclic { // short circuit as soon as a cycle is found + break + } + } + return +} + +// FromList transposes a labeled graph into a FromList. +// +// Receiver g should be connected as a tree or forest. Specifically no node +// can have multiple incoming arcs. If any node n in g has multiple incoming +// arcs, the method returns (nil, n) where n is a node with multiple +// incoming arcs. +// +// Otherwise (normally) the method populates the From members in a +// FromList.Path and returns the FromList and -1. +// +// Other members of the FromList are left as zero values. +// Use FromList.RecalcLen and FromList.RecalcLeaves as needed. +// +// Unusual cases are parallel arcs and loops. A parallel arc represents +// a case of multiple arcs going to some node and so will lead to a (nil, n) +// return, even though a graph might be considered a multigraph tree. +// A single loop on a node that would otherwise be a root node, though, +// is not a case of multiple incoming arcs and so does not force a (nil, n) +// result. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Directed) FromList() (*FromList, NI) { + paths := make([]PathEnd, len(g.AdjacencyList)) + for i := range paths { + paths[i].From = -1 + } + for fr, to := range g.AdjacencyList { + for _, to := range to { + if paths[to].From >= 0 { + return nil, to + } + paths[to].From = NI(fr) + } + } + return &FromList{Paths: paths}, -1 +} + +// InDegree computes the in-degree of each node in g +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Directed) InDegree() []int { + ind := make([]int, len(g.AdjacencyList)) + for _, nbs := range g.AdjacencyList { + for _, nb := range nbs { + ind[nb]++ + } + } + return ind +} + +// IsTree identifies trees in directed graphs. +// +// Return value isTree is true if the subgraph reachable from root is a tree. +// Further, return value allTree is true if the entire graph g is reachable +// from root. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Directed) IsTree(root NI) (isTree, allTree bool) { + a := g.AdjacencyList + var v Bits + v.SetAll(len(a)) + var df func(NI) bool + df = func(n NI) bool { + if v.Bit(n) == 0 { + return false + } + v.SetBit(n, 0) + for _, to := range a[n] { + if !df(to) { + return false + } + } + return true + } + isTree = df(root) + return isTree, isTree && v.Zero() +} + +// Tarjan identifies strongly connected components in a directed graph using +// Tarjan's algorithm. +// +// The method calls the emit argument for each component identified. Each +// component is a list of nodes. A property of the algorithm is that +// components are emitted in reverse topological order of the condensation. +// (See https://en.wikipedia.org/wiki/Strongly_connected_component#Definitions +// for description of condensation.) +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also TarjanForward and TarjanCondensation. +func (g Directed) Tarjan(emit func([]NI) bool) { + // See "Depth-first search and linear graph algorithms", Robert Tarjan, + // SIAM J. Comput. Vol. 1, No. 2, June 1972. + // + // Implementation here from Wikipedia pseudocode, + // http://en.wikipedia.org/w/index.php?title=Tarjan%27s_strongly_connected_components_algorithm&direction=prev&oldid=647184742 + var indexed, stacked Bits + a := g.AdjacencyList + index := make([]int, len(a)) + lowlink := make([]int, len(a)) + x := 0 + var S []NI + var sc func(NI) bool + sc = func(n NI) bool { + index[n] = x + indexed.SetBit(n, 1) + lowlink[n] = x + x++ + S = append(S, n) + stacked.SetBit(n, 1) + for _, nb := range a[n] { + if indexed.Bit(nb) == 0 { + if !sc(nb) { + return false + } + if lowlink[nb] < lowlink[n] { + lowlink[n] = lowlink[nb] + } + } else if stacked.Bit(nb) == 1 { + if index[nb] < lowlink[n] { + lowlink[n] = index[nb] + } + } + } + if lowlink[n] == index[n] { + var c []NI + for { + last := len(S) - 1 + w := S[last] + S = S[:last] + stacked.SetBit(w, 0) + c = append(c, w) + if w == n { + if !emit(c) { + return false + } + break + } + } + } + return true + } + for n := range a { + if indexed.Bit(NI(n)) == 0 && !sc(NI(n)) { + return + } + } +} + +// TarjanForward returns strongly connected components. +// +// It returns components in the reverse order of Tarjan, for situations +// where a forward topological ordering is easier. +func (g Directed) TarjanForward() [][]NI { + var r [][]NI + g.Tarjan(func(c []NI) bool { + r = append(r, c) + return true + }) + scc := make([][]NI, len(r)) + last := len(r) - 1 + for i, ci := range r { + scc[last-i] = ci + } + return scc +} + +// TarjanCondensation returns strongly connected components and their +// condensation graph. +// +// Components are ordered in a forward topological ordering. +func (g Directed) TarjanCondensation() (scc [][]NI, cd AdjacencyList) { + scc = g.TarjanForward() + cd = make(AdjacencyList, len(scc)) // return value + cond := make([]NI, len(g.AdjacencyList)) // mapping from g node to cd node + for cn := NI(len(scc) - 1); cn >= 0; cn-- { + c := scc[cn] + for _, n := range c { + cond[n] = NI(cn) // map g node to cd node + } + var tos []NI // list of 'to' nodes + var m Bits // tos map + m.SetBit(cn, 1) + for _, n := range c { + for _, to := range g.AdjacencyList[n] { + if ct := cond[to]; m.Bit(ct) == 0 { + m.SetBit(ct, 1) + tos = append(tos, ct) + } + } + } + cd[cn] = tos + } + return +} + +// Topological computes a topological ordering of a directed acyclic graph. +// +// For an acyclic graph, return value ordering is a permutation of node numbers +// in topologically sorted order and cycle will be nil. If the graph is found +// to be cyclic, ordering will be nil and cycle will be the path of a found +// cycle. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Directed) Topological() (ordering, cycle []NI) { + a := g.AdjacencyList + ordering = make([]NI, len(a)) + i := len(ordering) + var temp, perm Bits + var cycleFound bool + var cycleStart NI + var df func(NI) + df = func(n NI) { + switch { + case temp.Bit(n) == 1: + cycleFound = true + cycleStart = n + return + case perm.Bit(n) == 1: + return + } + temp.SetBit(n, 1) + for _, nb := range a[n] { + df(nb) + if cycleFound { + if cycleStart >= 0 { + // a little hack: orderng won't be needed so repurpose the + // slice as cycle. this is read out in reverse order + // as the recursion unwinds. + x := len(ordering) - 1 - len(cycle) + ordering[x] = n + cycle = ordering[x:] + if n == cycleStart { + cycleStart = -1 + } + } + return + } + } + temp.SetBit(n, 0) + perm.SetBit(n, 1) + i-- + ordering[i] = n + } + for n := range a { + if perm.Bit(NI(n)) == 1 { + continue + } + df(NI(n)) + if cycleFound { + return nil, cycle + } + } + return ordering, nil +} + +// TopologicalKahn computes a topological ordering of a directed acyclic graph. +// +// For an acyclic graph, return value ordering is a permutation of node numbers +// in topologically sorted order and cycle will be nil. If the graph is found +// to be cyclic, ordering will be nil and cycle will be the path of a found +// cycle. +// +// This function is based on the algorithm by Arthur Kahn and requires the +// transpose of g be passed as the argument. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Directed) TopologicalKahn(tr Directed) (ordering, cycle []NI) { + // code follows Wikipedia pseudocode. + var L, S []NI + // rem for "remaining edges," this function makes a local copy of the + // in-degrees and consumes that instead of consuming an input. + rem := make([]int, len(g.AdjacencyList)) + for n, fr := range tr.AdjacencyList { + if len(fr) == 0 { + // accumulate "set of all nodes with no incoming edges" + S = append(S, NI(n)) + } else { + // initialize rem from in-degree + rem[n] = len(fr) + } + } + for len(S) > 0 { + last := len(S) - 1 // "remove a node n from S" + n := S[last] + S = S[:last] + L = append(L, n) // "add n to tail of L" + for _, m := range g.AdjacencyList[n] { + // WP pseudo code reads "for each node m..." but it means for each + // node m *remaining in the graph.* We consume rem rather than + // the graph, so "remaining in the graph" for us means rem[m] > 0. + if rem[m] > 0 { + rem[m]-- // "remove edge from the graph" + if rem[m] == 0 { // if "m has no other incoming edges" + S = append(S, m) // "insert m into S" + } + } + } + } + // "If graph has edges," for us means a value in rem is > 0. + for c, in := range rem { + if in > 0 { + // recover cyclic nodes + for _, nb := range g.AdjacencyList[c] { + if rem[nb] > 0 { + cycle = append(cycle, NI(c)) + break + } + } + } + } + if len(cycle) > 0 { + return nil, cycle + } + return L, nil +} diff --git a/vendor/github.com/soniakeys/graph/dir_cg.go b/vendor/github.com/soniakeys/graph/dir_cg.go new file mode 100644 index 00000000..2b82f4f1 --- /dev/null +++ b/vendor/github.com/soniakeys/graph/dir_cg.go @@ -0,0 +1,395 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +package graph + +// dir_RO.go is code generated from dir_cg.go by directives in graph.go. +// Editing dir_cg.go is okay. It is the code generation source. +// DO NOT EDIT dir_RO.go. +// The RO means read only and it is upper case RO to slow you down a bit +// in case you start to edit the file. + +// Balanced returns true if for every node in g, in-degree equals out-degree. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledDirected) Balanced() bool { + for n, in := range g.InDegree() { + if in != len(g.LabeledAdjacencyList[n]) { + return false + } + } + return true +} + +// Copy makes a deep copy of g. +// Copy also computes the arc size ma, the number of arcs. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledDirected) Copy() (c LabeledDirected, ma int) { + l, s := g.LabeledAdjacencyList.Copy() + return LabeledDirected{l}, s +} + +// Cyclic determines if g contains a cycle, a non-empty path from a node +// back to itself. +// +// Cyclic returns true if g contains at least one cycle. It also returns +// an example of an arc involved in a cycle. +// Cyclic returns false if g is acyclic. +// +// Also see Topological, which detects cycles. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledDirected) Cyclic() (cyclic bool, fr NI, to Half) { + a := g.LabeledAdjacencyList + fr, to.To = -1, -1 + var temp, perm Bits + var df func(NI) + df = func(n NI) { + switch { + case temp.Bit(n) == 1: + cyclic = true + return + case perm.Bit(n) == 1: + return + } + temp.SetBit(n, 1) + for _, nb := range a[n] { + df(nb.To) + if cyclic { + if fr < 0 { + fr, to = n, nb + } + return + } + } + temp.SetBit(n, 0) + perm.SetBit(n, 1) + } + for n := range a { + if perm.Bit(NI(n)) == 1 { + continue + } + if df(NI(n)); cyclic { // short circuit as soon as a cycle is found + break + } + } + return +} + +// FromList transposes a labeled graph into a FromList. +// +// Receiver g should be connected as a tree or forest. Specifically no node +// can have multiple incoming arcs. If any node n in g has multiple incoming +// arcs, the method returns (nil, n) where n is a node with multiple +// incoming arcs. +// +// Otherwise (normally) the method populates the From members in a +// FromList.Path and returns the FromList and -1. +// +// Other members of the FromList are left as zero values. +// Use FromList.RecalcLen and FromList.RecalcLeaves as needed. +// +// Unusual cases are parallel arcs and loops. A parallel arc represents +// a case of multiple arcs going to some node and so will lead to a (nil, n) +// return, even though a graph might be considered a multigraph tree. +// A single loop on a node that would otherwise be a root node, though, +// is not a case of multiple incoming arcs and so does not force a (nil, n) +// result. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledDirected) FromList() (*FromList, NI) { + paths := make([]PathEnd, len(g.LabeledAdjacencyList)) + for i := range paths { + paths[i].From = -1 + } + for fr, to := range g.LabeledAdjacencyList { + for _, to := range to { + if paths[to.To].From >= 0 { + return nil, to.To + } + paths[to.To].From = NI(fr) + } + } + return &FromList{Paths: paths}, -1 +} + +// InDegree computes the in-degree of each node in g +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledDirected) InDegree() []int { + ind := make([]int, len(g.LabeledAdjacencyList)) + for _, nbs := range g.LabeledAdjacencyList { + for _, nb := range nbs { + ind[nb.To]++ + } + } + return ind +} + +// IsTree identifies trees in directed graphs. +// +// Return value isTree is true if the subgraph reachable from root is a tree. +// Further, return value allTree is true if the entire graph g is reachable +// from root. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledDirected) IsTree(root NI) (isTree, allTree bool) { + a := g.LabeledAdjacencyList + var v Bits + v.SetAll(len(a)) + var df func(NI) bool + df = func(n NI) bool { + if v.Bit(n) == 0 { + return false + } + v.SetBit(n, 0) + for _, to := range a[n] { + if !df(to.To) { + return false + } + } + return true + } + isTree = df(root) + return isTree, isTree && v.Zero() +} + +// Tarjan identifies strongly connected components in a directed graph using +// Tarjan's algorithm. +// +// The method calls the emit argument for each component identified. Each +// component is a list of nodes. A property of the algorithm is that +// components are emitted in reverse topological order of the condensation. +// (See https://en.wikipedia.org/wiki/Strongly_connected_component#Definitions +// for description of condensation.) +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also TarjanForward and TarjanCondensation. +func (g LabeledDirected) Tarjan(emit func([]NI) bool) { + // See "Depth-first search and linear graph algorithms", Robert Tarjan, + // SIAM J. Comput. Vol. 1, No. 2, June 1972. + // + // Implementation here from Wikipedia pseudocode, + // http://en.wikipedia.org/w/index.php?title=Tarjan%27s_strongly_connected_components_algorithm&direction=prev&oldid=647184742 + var indexed, stacked Bits + a := g.LabeledAdjacencyList + index := make([]int, len(a)) + lowlink := make([]int, len(a)) + x := 0 + var S []NI + var sc func(NI) bool + sc = func(n NI) bool { + index[n] = x + indexed.SetBit(n, 1) + lowlink[n] = x + x++ + S = append(S, n) + stacked.SetBit(n, 1) + for _, nb := range a[n] { + if indexed.Bit(nb.To) == 0 { + if !sc(nb.To) { + return false + } + if lowlink[nb.To] < lowlink[n] { + lowlink[n] = lowlink[nb.To] + } + } else if stacked.Bit(nb.To) == 1 { + if index[nb.To] < lowlink[n] { + lowlink[n] = index[nb.To] + } + } + } + if lowlink[n] == index[n] { + var c []NI + for { + last := len(S) - 1 + w := S[last] + S = S[:last] + stacked.SetBit(w, 0) + c = append(c, w) + if w == n { + if !emit(c) { + return false + } + break + } + } + } + return true + } + for n := range a { + if indexed.Bit(NI(n)) == 0 && !sc(NI(n)) { + return + } + } +} + +// TarjanForward returns strongly connected components. +// +// It returns components in the reverse order of Tarjan, for situations +// where a forward topological ordering is easier. +func (g LabeledDirected) TarjanForward() [][]NI { + var r [][]NI + g.Tarjan(func(c []NI) bool { + r = append(r, c) + return true + }) + scc := make([][]NI, len(r)) + last := len(r) - 1 + for i, ci := range r { + scc[last-i] = ci + } + return scc +} + +// TarjanCondensation returns strongly connected components and their +// condensation graph. +// +// Components are ordered in a forward topological ordering. +func (g LabeledDirected) TarjanCondensation() (scc [][]NI, cd AdjacencyList) { + scc = g.TarjanForward() + cd = make(AdjacencyList, len(scc)) // return value + cond := make([]NI, len(g.LabeledAdjacencyList)) // mapping from g node to cd node + for cn := NI(len(scc) - 1); cn >= 0; cn-- { + c := scc[cn] + for _, n := range c { + cond[n] = NI(cn) // map g node to cd node + } + var tos []NI // list of 'to' nodes + var m Bits // tos map + m.SetBit(cn, 1) + for _, n := range c { + for _, to := range g.LabeledAdjacencyList[n] { + if ct := cond[to.To]; m.Bit(ct) == 0 { + m.SetBit(ct, 1) + tos = append(tos, ct) + } + } + } + cd[cn] = tos + } + return +} + +// Topological computes a topological ordering of a directed acyclic graph. +// +// For an acyclic graph, return value ordering is a permutation of node numbers +// in topologically sorted order and cycle will be nil. If the graph is found +// to be cyclic, ordering will be nil and cycle will be the path of a found +// cycle. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledDirected) Topological() (ordering, cycle []NI) { + a := g.LabeledAdjacencyList + ordering = make([]NI, len(a)) + i := len(ordering) + var temp, perm Bits + var cycleFound bool + var cycleStart NI + var df func(NI) + df = func(n NI) { + switch { + case temp.Bit(n) == 1: + cycleFound = true + cycleStart = n + return + case perm.Bit(n) == 1: + return + } + temp.SetBit(n, 1) + for _, nb := range a[n] { + df(nb.To) + if cycleFound { + if cycleStart >= 0 { + // a little hack: orderng won't be needed so repurpose the + // slice as cycle. this is read out in reverse order + // as the recursion unwinds. + x := len(ordering) - 1 - len(cycle) + ordering[x] = n + cycle = ordering[x:] + if n == cycleStart { + cycleStart = -1 + } + } + return + } + } + temp.SetBit(n, 0) + perm.SetBit(n, 1) + i-- + ordering[i] = n + } + for n := range a { + if perm.Bit(NI(n)) == 1 { + continue + } + df(NI(n)) + if cycleFound { + return nil, cycle + } + } + return ordering, nil +} + +// TopologicalKahn computes a topological ordering of a directed acyclic graph. +// +// For an acyclic graph, return value ordering is a permutation of node numbers +// in topologically sorted order and cycle will be nil. If the graph is found +// to be cyclic, ordering will be nil and cycle will be the path of a found +// cycle. +// +// This function is based on the algorithm by Arthur Kahn and requires the +// transpose of g be passed as the argument. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledDirected) TopologicalKahn(tr Directed) (ordering, cycle []NI) { + // code follows Wikipedia pseudocode. + var L, S []NI + // rem for "remaining edges," this function makes a local copy of the + // in-degrees and consumes that instead of consuming an input. + rem := make([]int, len(g.LabeledAdjacencyList)) + for n, fr := range tr.AdjacencyList { + if len(fr) == 0 { + // accumulate "set of all nodes with no incoming edges" + S = append(S, NI(n)) + } else { + // initialize rem from in-degree + rem[n] = len(fr) + } + } + for len(S) > 0 { + last := len(S) - 1 // "remove a node n from S" + n := S[last] + S = S[:last] + L = append(L, n) // "add n to tail of L" + for _, m := range g.LabeledAdjacencyList[n] { + // WP pseudo code reads "for each node m..." but it means for each + // node m *remaining in the graph.* We consume rem rather than + // the graph, so "remaining in the graph" for us means rem[m] > 0. + if rem[m.To] > 0 { + rem[m.To]-- // "remove edge from the graph" + if rem[m.To] == 0 { // if "m has no other incoming edges" + S = append(S, m.To) // "insert m into S" + } + } + } + } + // "If graph has edges," for us means a value in rem is > 0. + for c, in := range rem { + if in > 0 { + // recover cyclic nodes + for _, nb := range g.LabeledAdjacencyList[c] { + if rem[nb.To] > 0 { + cycle = append(cycle, NI(c)) + break + } + } + } + } + if len(cycle) > 0 { + return nil, cycle + } + return L, nil +} diff --git a/vendor/github.com/soniakeys/graph/doc.go b/vendor/github.com/soniakeys/graph/doc.go new file mode 100644 index 00000000..6d072789 --- /dev/null +++ b/vendor/github.com/soniakeys/graph/doc.go @@ -0,0 +1,128 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +// Graph algorithms: Dijkstra, A*, Bellman Ford, Floyd Warshall; +// Kruskal and Prim minimal spanning tree; topological sort and DAG longest +// and shortest paths; Eulerian cycle and path; degeneracy and k-cores; +// Bron Kerbosch clique finding; connected components; and others. +// +// This is a graph library of integer indexes. To use it with application +// data, you associate data with integer indexes, perform searches or other +// operations with the library, and then use the integer index results to refer +// back to your application data. +// +// Thus it does not store application data, pointers to application data, +// or require you to implement an interface on your application data. +// The idea is to keep the library methods fast and lean. +// +// Representation overview +// +// The package defines a type for a node index (NI) which is just an integer +// type. It defines types for a number of number graph representations using +// NI. The fundamental graph type is AdjacencyList, which is the +// common "list of lists" graph representation. It is a list as a slice +// with one element for each node of the graph. Each element is a list +// itself, a list of neighbor nodes, implemented as an NI slice. Methods +// on an AdjacencyList generally work on any representable graph, including +// directed or undirected graphs, simple graphs or multigraphs. +// +// The type Undirected embeds an AdjacencyList adding methods specific to +// undirected graphs. Similarly the type Directed adds methods meaningful +// for directed graphs. +// +// Similar to NI, the type LI is a "label index" which labels a +// node-to-neighbor "arc" or edge. Just as an NI can index arbitrary node +// data, an LI can index arbitrary arc or edge data. A number of algorithms +// use a "weight" associated with an arc. This package does not represent +// weighted arcs explicitly, but instead uses the LI as a more general +// mechanism allowing not only weights but arbitrary data to be associated +// with arcs. While AdjacencyList represents an arc with simply an NI, +// the type LabeledAdjacencyList uses a type that pairs an NI with an LI. +// This type is named Half, for half-arc. (A full arc would represent +// both ends.) Types LabeledDirected and LabeledUndirected embed a +// LabeledAdjacencyList. +// +// In contrast to Half, the type Edge represents both ends of an edge (but +// no label.) The type LabeledEdge adds the label. The type WeightedEdgeList +// bundles a list of LabeledEdges with a WeightFunc. WeightedEdgeList is +// currently only used by Kruskal methods. +// +// FromList is a compact rooted tree (or forest) respresentation. Like +// AdjacencyList and LabeledAdjacencyList, it is a list with one element for +// each node of the graph. Each element contains only a single neighbor +// however, its parent in the tree, the "from" node. +// +// Code generation +// +// A number of methods on AdjacencyList, Directed, and Undirected are +// applicable to LabeledAdjacencyList, LabeledDirected, and LabeledUndirected +// simply by ignoring the label. In these cases code generation provides +// methods on both types from a single source implementation. These methods +// are documented with the sentence "There are equivalent labeled and unlabeled +// versions of this method" and examples are provided only for the unlabeled +// version. +// +// Terminology +// +// This package uses the term "node" rather than "vertex." It uses "arc" +// to mean a directed edge, and uses "from" and "to" to refer to the ends +// of an arc. It uses "start" and "end" to refer to endpoints of a search +// or traversal. +// +// The usage of "to" and "from" is perhaps most strange. In common speech +// they are prepositions, but throughout this package they are used as +// adjectives, for example to refer to the "from node" of an arc or the +// "to node". The type "FromList" is named to indicate it stores a list of +// "from" values. +// +// A "half arc" refers to just one end of an arc, either the to or from end. +// +// Two arcs are "reciprocal" if they connect two distinct nodes n1 and n2, +// one arc leading from n1 to n2 and the other arc leading from n2 to n1. +// Undirected graphs are represented with reciprocal arcs. +// +// A node with an arc to itself represents a "loop." Duplicate arcs, where +// a node has multiple arcs to another node, are termed "parallel arcs." +// A graph with no loops or parallel arcs is "simple." A graph that allows +// parallel arcs is a "multigraph" +// +// The "size" of a graph traditionally means the number of undirected edges. +// This package uses "arc size" to mean the number of arcs in a graph. For an +// undirected graph without loops, arc size is 2 * size. +// +// The "order" of a graph is the number of nodes. An "ordering" though means +// an ordered list of nodes. +// +// A number of graph search algorithms use a concept of arc "weights." +// The sum of arc weights along a path is a "distance." In contrast, the +// number of nodes in a path, including start and end nodes, is the path's +// "length." (Yes, mixing weights and lengths would be nonsense physically, +// but the terms used here are just distinct terms for abstract values. +// The actual meaning to an application is likely to be something else +// entirely and is not relevant within this package.) +// +// Finally, this package documentation takes back the word "object" in some +// places to refer to a Go value, especially a value of a type with methods. +// +// Shortest path searches +// +// This package implements a number of shortest path searches. Most work +// with weighted graphs that are directed or undirected, and with graphs +// that may have loops or parallel arcs. For weighted graphs, "shortest" +// is defined as the path distance (sum of arc weights) with path length +// (number of nodes) breaking ties. If multiple paths have the same minimum +// distance with the same minimum length, search methods are free to return +// any of them. +// +// Type name Description, methods +// BreadthFirst Unweigted arcs, traversal, single path search or all paths. +// BreadthFirst2 Direction-optimizing variant of BreadthFirst. +// Dijkstra Non-negative arc weights, single or all paths. +// AStar Non-negative arc weights, heuristic guided, single path. +// BellmanFord Negative arc weights allowed, no negative cycles, all paths. +// DAGPath O(n) algorithm for DAGs, arc weights of any sign. +// FloydWarshall all pairs distances, no negative cycles. +// +// These searches typically have one method that is full-featured and +// then a convenience method with a simpler API targeting a simpler use case. +package graph diff --git a/vendor/github.com/soniakeys/graph/fromlist.go b/vendor/github.com/soniakeys/graph/fromlist.go new file mode 100644 index 00000000..31d41fa1 --- /dev/null +++ b/vendor/github.com/soniakeys/graph/fromlist.go @@ -0,0 +1,418 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +package graph + +// FromList represents a rooted tree (or forest) where each node is associated +// with a half arc identifying an arc "from" another node. +// +// Other terms for this data structure include "parent list", +// "predecessor list", "in-tree", "inverse arborescence", and +// "spaghetti stack." +// +// The Paths member represents the tree structure. Leaves and MaxLen are +// not always needed. Where Leaves is used it serves as a bitmap where +// Leaves.Bit(n) == 1 for each leaf n of the tree. Where MaxLen is used it is +// provided primarily as a convenience for functions that might want to +// anticipate the maximum path length that would be encountered traversing +// the tree. +// +// Various graph search methods use a FromList to returns search results. +// For a start node of a search, From will be -1 and Len will be 1. For other +// nodes reached by the search, From represents a half arc in a path back to +// start and Len represents the number of nodes in the path. For nodes not +// reached by the search, From will be -1 and Len will be 0. +// +// A single FromList can also represent a forest. In this case paths from +// all leaves do not return to a single root node, but multiple root nodes. +// +// While a FromList generally encodes a tree or forest, it is technically +// possible to encode a cyclic graph. A number of FromList methods require +// the receiver to be acyclic. Graph methods documented to return a tree or +// forest will never return a cyclic FromList. In other cases however, +// where a FromList is not known to by cyclic, the Cyclic method can be +// useful to validate the acyclic property. +type FromList struct { + Paths []PathEnd // tree representation + Leaves Bits // leaves of tree + MaxLen int // length of longest path, max of all PathEnd.Len values +} + +// PathEnd associates a half arc and a path length. +// +// A PathEnd list is an element type of FromList. +type PathEnd struct { + From NI // a "from" half arc, the node the arc comes from + Len int // number of nodes in path from start +} + +// NewFromList creates a FromList object of given order. +// +// The Paths member is allocated to length n but there is no other +// initialization. +func NewFromList(n int) FromList { + return FromList{Paths: make([]PathEnd, n)} +} + +// BoundsOk validates the "from" values in the list. +// +// Negative values are allowed as they indicate root nodes. +// +// BoundsOk returns true when all from values are less than len(t). +// Otherwise it returns false and a node with a from value >= len(t). +func (f FromList) BoundsOk() (ok bool, n NI) { + for n, e := range f.Paths { + if int(e.From) >= len(f.Paths) { + return false, NI(n) + } + } + return true, -1 +} + +// CommonStart returns the common start node of minimal paths to a and b. +// +// It returns -1 if a and b cannot be traced back to a common node. +// +// The method relies on populated PathEnd.Len members. Use RecalcLen if +// the Len members are not known to be present and correct. +func (f FromList) CommonStart(a, b NI) NI { + p := f.Paths + if p[a].Len < p[b].Len { + a, b = b, a + } + for bl := p[b].Len; p[a].Len > bl; { + a = p[a].From + if a < 0 { + return -1 + } + } + for a != b { + a = p[a].From + if a < 0 { + return -1 + } + b = p[b].From + } + return a +} + +// Cyclic determines if f contains a cycle, a non-empty path from a node +// back to itself. +// +// Cyclic returns true if g contains at least one cycle. It also returns +// an example of a node involved in a cycle. +// +// Cyclic returns (false, -1) in the normal case where f is acyclic. +// Note that the bool is not an "ok" return. A cyclic FromList is usually +// not okay. +func (f FromList) Cyclic() (cyclic bool, n NI) { + var vis Bits + p := f.Paths + for i := range p { + var path Bits + for n := NI(i); vis.Bit(n) == 0; { + vis.SetBit(n, 1) + path.SetBit(n, 1) + if n = p[n].From; n < 0 { + break + } + if path.Bit(n) == 1 { + return true, n + } + } + } + return false, -1 +} + +// IsolatedNodeBits returns a bitmap of isolated nodes in receiver graph f. +// +// An isolated node is one with no arcs going to or from it. +func (f FromList) IsolatedNodes() (iso Bits) { + p := f.Paths + iso.SetAll(len(p)) + for n, e := range p { + if e.From >= 0 { + iso.SetBit(NI(n), 0) + iso.SetBit(e.From, 0) + } + } + return +} + +// PathTo decodes a FromList, recovering a single path. +// +// The path is returned as a list of nodes where the first element will be +// a root node and the last element will be the specified end node. +// +// Only the Paths member of the receiver is used. Other members of the +// FromList do not need to be valid, however the MaxLen member can be useful +// for allocating argument p. +// +// Argument p can provide the result slice. If p has capacity for the result +// it will be used, otherwise a new slice is created for the result. +// +// See also function PathTo. +func (f FromList) PathTo(end NI, p []NI) []NI { + return PathTo(f.Paths, end, p) +} + +// PathTo decodes a single path from a PathEnd list. +// +// A PathEnd list is the main data representation in a FromList. See FromList. +// +// PathTo returns a list of nodes where the first element will be +// a root node and the last element will be the specified end node. +// +// Argument p can provide the result slice. If p has capacity for the result +// it will be used, otherwise a new slice is created for the result. +// +// See also method FromList.PathTo. +func PathTo(paths []PathEnd, end NI, p []NI) []NI { + n := paths[end].Len + if n == 0 { + return nil + } + if cap(p) >= n { + p = p[:n] + } else { + p = make([]NI, n) + } + for { + n-- + p[n] = end + if n == 0 { + return p + } + end = paths[end].From + } +} + +// Preorder traverses f calling Visitor v in preorder. +// +// Nodes are visited in order such that for any node n with from node fr, +// fr is visited before n. Where f represents a tree, the visit ordering +// corresponds to a preordering, or depth first traversal of the tree. +// Where f represents a forest, the preorderings of the trees can be +// intermingled. +// +// Leaves must be set correctly first. Use RecalcLeaves if leaves are not +// known to be set correctly. FromList f cannot be cyclic. +// +// Traversal continues while v returns true. It terminates if v returns false. +// Preorder returns true if it completes without v returning false. Preorder +// returns false if traversal is terminated by v returning false. +func (f FromList) Preorder(v OkNodeVisitor) bool { + p := f.Paths + var done Bits + var df func(NI) bool + df = func(n NI) bool { + done.SetBit(n, 1) + if fr := p[n].From; fr >= 0 && done.Bit(fr) == 0 { + df(fr) + } + return v(n) + } + for n := range f.Paths { + p[n].Len = 0 + } + return f.Leaves.Iterate(func(n NI) bool { + return df(n) + }) +} + +// RecalcLeaves recomputes the Leaves member of f. +func (f *FromList) RecalcLeaves() { + p := f.Paths + lv := &f.Leaves + lv.SetAll(len(p)) + for n := range f.Paths { + if fr := p[n].From; fr >= 0 { + lv.SetBit(fr, 0) + } + } +} + +// RecalcLen recomputes Len for each path end, and recomputes MaxLen. +// +// RecalcLen relies on the Leaves member being valid. If it is not known +// to be valid, call RecalcLeaves before calling RecalcLen. +func (f *FromList) RecalcLen() { + p := f.Paths + var setLen func(NI) int + setLen = func(n NI) int { + switch { + case p[n].Len > 0: + return p[n].Len + case p[n].From < 0: + p[n].Len = 1 + return 1 + } + l := 1 + setLen(p[n].From) + p[n].Len = l + return l + } + for n := range f.Paths { + p[n].Len = 0 + } + f.MaxLen = 0 + f.Leaves.Iterate(func(n NI) bool { + if l := setLen(NI(n)); l > f.MaxLen { + f.MaxLen = l + } + return true + }) +} + +// ReRoot reorients the tree containing n to make n the root node. +// +// It keeps the tree connected by "reversing" the path from n to the old root. +// +// After ReRoot, the Leaves and Len members are invalid. +// Call RecalcLeaves or RecalcLen as needed. +func (f *FromList) ReRoot(n NI) { + p := f.Paths + fr := p[n].From + if fr < 0 { + return + } + p[n].From = -1 + for { + ff := p[fr].From + p[fr].From = n + if ff < 0 { + return + } + n = fr + fr = ff + } +} + +// Root finds the root of a node in a FromList. +func (f FromList) Root(n NI) NI { + for p := f.Paths; ; { + fr := p[n].From + if fr < 0 { + return n + } + n = fr + } +} + +// Transpose constructs the directed graph corresponding to FromList f +// but with arcs in the opposite direction. That is, from roots toward leaves. +// +// The method relies only on the From member of f.Paths. Other members of +// the FromList are not used. +// +// See FromList.TransposeRoots for a version that also accumulates and returns +// information about the roots. +func (f FromList) Transpose() Directed { + g := make(AdjacencyList, len(f.Paths)) + for n, p := range f.Paths { + if p.From == -1 { + continue + } + g[p.From] = append(g[p.From], NI(n)) + } + return Directed{g} +} + +// TransposeLabeled constructs the directed labeled graph corresponding +// to FromList f but with arcs in the opposite direction. That is, from +// roots toward leaves. +// +// The argument labels can be nil. In this case labels are generated matching +// the path indexes. This corresponds to the "to", or child node. +// +// If labels is non-nil, it must be the same length as f.Paths and is used +// to look up label numbers by the path index. +// +// The method relies only on the From member of f.Paths. Other members of +// the FromList are not used. +// +// See FromList.TransposeLabeledRoots for a version that also accumulates +// and returns information about the roots. +func (f FromList) TransposeLabeled(labels []LI) LabeledDirected { + g := make(LabeledAdjacencyList, len(f.Paths)) + for n, p := range f.Paths { + if p.From == -1 { + continue + } + l := LI(n) + if labels != nil { + l = labels[n] + } + g[p.From] = append(g[p.From], Half{NI(n), l}) + } + return LabeledDirected{g} +} + +// TransposeLabeledRoots constructs the labeled directed graph corresponding +// to FromList f but with arcs in the opposite direction. That is, from +// roots toward leaves. +// +// TransposeLabeledRoots also returns a count of roots of the resulting forest +// and a bitmap of the roots. +// +// The argument labels can be nil. In this case labels are generated matching +// the path indexes. This corresponds to the "to", or child node. +// +// If labels is non-nil, it must be the same length as t.Paths and is used +// to look up label numbers by the path index. +// +// The method relies only on the From member of f.Paths. Other members of +// the FromList are not used. +// +// See FromList.TransposeLabeled for a simpler verstion that returns the +// forest only. +func (f FromList) TransposeLabeledRoots(labels []LI) (forest LabeledDirected, nRoots int, roots Bits) { + p := f.Paths + nRoots = len(p) + roots.SetAll(len(p)) + g := make(LabeledAdjacencyList, len(p)) + for i, p := range f.Paths { + if p.From == -1 { + continue + } + l := LI(i) + if labels != nil { + l = labels[i] + } + n := NI(i) + g[p.From] = append(g[p.From], Half{n, l}) + if roots.Bit(n) == 1 { + roots.SetBit(n, 0) + nRoots-- + } + } + return LabeledDirected{g}, nRoots, roots +} + +// TransposeRoots constructs the directed graph corresponding to FromList f +// but with arcs in the opposite direction. That is, from roots toward leaves. +// +// TransposeRoots also returns a count of roots of the resulting forest and +// a bitmap of the roots. +// +// The method relies only on the From member of f.Paths. Other members of +// the FromList are not used. +// +// See FromList.Transpose for a simpler verstion that returns the forest only. +func (f FromList) TransposeRoots() (forest Directed, nRoots int, roots Bits) { + p := f.Paths + nRoots = len(p) + roots.SetAll(len(p)) + g := make(AdjacencyList, len(p)) + for i, e := range p { + if e.From == -1 { + continue + } + n := NI(i) + g[e.From] = append(g[e.From], n) + if roots.Bit(n) == 1 { + roots.SetBit(n, 0) + nRoots-- + } + } + return Directed{g}, nRoots, roots +} diff --git a/vendor/github.com/soniakeys/graph/graph.go b/vendor/github.com/soniakeys/graph/graph.go new file mode 100644 index 00000000..a2044e9a --- /dev/null +++ b/vendor/github.com/soniakeys/graph/graph.go @@ -0,0 +1,181 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +package graph + +// graph.go contains type definitions for all graph types and components. +// Also, go generate directives for source transformations. +// +// For readability, the types are defined in a dependency order: +// +// NI +// NodeList +// AdjacencyList +// Directed +// Undirected +// LI +// Half +// LabeledAdjacencyList +// LabeledDirected +// LabeledUndirected +// Edge +// LabeledEdge +// WeightFunc +// WeightedEdgeList + +//go:generate cp adj_cg.go adj_RO.go +//go:generate gofmt -r "LabeledAdjacencyList -> AdjacencyList" -w adj_RO.go +//go:generate gofmt -r "n.To -> n" -w adj_RO.go +//go:generate gofmt -r "Half -> NI" -w adj_RO.go + +//go:generate cp dir_cg.go dir_RO.go +//go:generate gofmt -r "LabeledDirected -> Directed" -w dir_RO.go +//go:generate gofmt -r "LabeledAdjacencyList -> AdjacencyList" -w dir_RO.go +//go:generate gofmt -r "n.To -> n" -w dir_RO.go +//go:generate gofmt -r "Half -> NI" -w dir_RO.go + +//go:generate cp undir_cg.go undir_RO.go +//go:generate gofmt -r "LabeledUndirected -> Undirected" -w undir_RO.go +//go:generate gofmt -r "LabeledAdjacencyList -> AdjacencyList" -w undir_RO.go +//go:generate gofmt -r "n.To -> n" -w undir_RO.go +//go:generate gofmt -r "Half -> NI" -w undir_RO.go + +// NI is a "node int" +// +// It is a node number or node ID. NIs are used extensively as slice indexes. +// NIs typically account for a significant fraction of the memory footprint of +// a graph. +type NI int32 + +// NodeList satisfies sort.Interface. +type NodeList []NI + +func (l NodeList) Len() int { return len(l) } +func (l NodeList) Less(i, j int) bool { return l[i] < l[j] } +func (l NodeList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } + +// An AdjacencyList represents a graph as a list of neighbors for each node. +// The "node ID" of a node is simply it's slice index in the AdjacencyList. +// For an AdjacencyList g, g[n] represents arcs going from node n to nodes +// g[n]. +// +// Adjacency lists are inherently directed but can be used to represent +// directed or undirected graphs. See types Directed and Undirected. +type AdjacencyList [][]NI + +// Directed represents a directed graph. +// +// Directed methods generally rely on the graph being directed, specifically +// that arcs do not have reciprocals. +type Directed struct { + AdjacencyList // embedded to include AdjacencyList methods +} + +// Undirected represents an undirected graph. +// +// In an undirected graph, for each arc between distinct nodes there is also +// a reciprocal arc, an arc in the opposite direction. Loops do not have +// reciprocals. +// +// Undirected methods generally rely on the graph being undirected, +// specifically that every arc between distinct nodes has a reciprocal. +type Undirected struct { + AdjacencyList // embedded to include AdjacencyList methods +} + +// LI is a label integer, used for associating labels with arcs. +type LI int32 + +// Half is a half arc, representing a labeled arc and the "neighbor" node +// that the arc leads to. +// +// Halfs can be composed to form a labeled adjacency list. +type Half struct { + To NI // node ID, usable as a slice index + Label LI // half-arc ID for application data, often a weight +} + +// A LabeledAdjacencyList represents a graph as a list of neighbors for each +// node, connected by labeled arcs. +// +// Arc labels are not necessarily unique arc IDs. Different arcs can have +// the same label. +// +// Arc labels are commonly used to assocate a weight with an arc. Arc labels +// are general purpose however and can be used to associate arbitrary +// information with an arc. +// +// Methods implementing weighted graph algorithms will commonly take a +// weight function that turns a label int into a float64 weight. +// +// If only a small amount of information -- such as an integer weight or +// a single printable character -- needs to be associated, it can sometimes +// be possible to encode the information directly into the label int. For +// more generality, some lookup scheme will be needed. +// +// In an undirected labeled graph, reciprocal arcs must have identical labels. +// Note this does not preclude parallel arcs with different labels. +type LabeledAdjacencyList [][]Half + +// LabeledDirected represents a directed labeled graph. +// +// This is the labeled version of Directed. See types LabeledAdjacencyList +// and Directed. +type LabeledDirected struct { + LabeledAdjacencyList // embedded to include LabeledAdjacencyList methods +} + +// LabeledUndirected represents an undirected labeled graph. +// +// This is the labeled version of Undirected. See types LabeledAdjacencyList +// and Undirected. +type LabeledUndirected struct { + LabeledAdjacencyList // embedded to include LabeledAdjacencyList methods +} + +// Edge is an undirected edge between nodes N1 and N2. +type Edge struct{ N1, N2 NI } + +// LabeledEdge is an undirected edge with an associated label. +type LabeledEdge struct { + Edge + LI +} + +// WeightFunc returns a weight for a given label. +// +// WeightFunc is a parameter type for various search functions. The intent +// is to return a weight corresponding to an arc label. The name "weight" +// is an abstract term. An arc "weight" will typically have some application +// specific meaning other than physical weight. +type WeightFunc func(label LI) (weight float64) + +// WeightedEdgeList is a graph representation. +// +// It is a labeled edge list, with an associated weight function to return +// a weight given an edge label. +// +// Also associated is the order, or number of nodes of the graph. +// All nodes occurring in the edge list must be strictly less than Order. +// +// WeigtedEdgeList sorts by weight, obtained by calling the weight function. +// If weight computation is expensive, consider supplying a cached or +// memoized version. +type WeightedEdgeList struct { + Order int + WeightFunc + Edges []LabeledEdge +} + +// Len implements sort.Interface. +func (l WeightedEdgeList) Len() int { return len(l.Edges) } + +// Less implements sort.Interface. +func (l WeightedEdgeList) Less(i, j int) bool { + return l.WeightFunc(l.Edges[i].LI) < l.WeightFunc(l.Edges[j].LI) +} + +// Swap implements sort.Interface. +func (l WeightedEdgeList) Swap(i, j int) { + l.Edges[i], l.Edges[j] = l.Edges[j], l.Edges[i] +} diff --git a/vendor/github.com/soniakeys/graph/hacking.md b/vendor/github.com/soniakeys/graph/hacking.md new file mode 100644 index 00000000..30d2d7c5 --- /dev/null +++ b/vendor/github.com/soniakeys/graph/hacking.md @@ -0,0 +1,37 @@ +#Hacking + +Basic use of the package is just go get, or git clone; go install. There are +no dependencies outside the standard library. + +The primary to-do list is the issue tracker on Github. I maintained a +journal on google drive for a while but at some point filed issues for all +remaining ideas in that document that still seemed relevant. So currently +there is no other roadmap or planning document. + +CI is currently on travis-ci.org. The .travis.yml builds for go 1.2.1 +following https://github.com/soniakeys/graph/issues/49, and it currently builds +for go 1.6 as well. The travis script calls a shell script right away because +I didn’t see a way to get it to do different steps for the different go +versions. For 1.2.1, I just wanted the basic tests. For a current go version +such as 1.6, there’s a growing list of checks. + +The GOARCH=386 test is for https://github.com/soniakeys/graph/issues/41. +The problem is the architecture specific code in bits32.go and bits64.go. +Yes, there are architecture independent algorithms. There is also assembly +to access machine instructions. Anyway, it’s the way it is for now. + +Im not big on making go vet happy just for a badge but I really like the +example check that I believe appeared with go 1.6. (I think it will be a +standard check with 1.7, so the test script will have to change then.) + +https://github.com/client9/misspell has been valuable. + +Also I wrote https://github.com/soniakeys/vetc to validate that each source +file has copyright/license statement. + +Then, it’s not in the ci script, but I wrote https://github.com/soniakeys/rcv +to put coverage stats in the readme. Maybe it could be commit hook or +something but for now I’ll try just running it manually now and then. + +Go fmt is not in the ci script, but I have at least one editor set up to run +it on save, so code should stay formatted pretty well. diff --git a/vendor/github.com/soniakeys/graph/mst.go b/vendor/github.com/soniakeys/graph/mst.go new file mode 100644 index 00000000..028e680c --- /dev/null +++ b/vendor/github.com/soniakeys/graph/mst.go @@ -0,0 +1,244 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +package graph + +import ( + "container/heap" + "sort" +) + +type dsElement struct { + from NI + rank int +} + +type disjointSet struct { + set []dsElement +} + +func newDisjointSet(n int) disjointSet { + set := make([]dsElement, n) + for i := range set { + set[i].from = -1 + } + return disjointSet{set} +} + +// return true if disjoint trees were combined. +// false if x and y were already in the same tree. +func (ds disjointSet) union(x, y NI) bool { + xr := ds.find(x) + yr := ds.find(y) + if xr == yr { + return false + } + switch xe, ye := &ds.set[xr], &ds.set[yr]; { + case xe.rank < ye.rank: + xe.from = yr + case xe.rank == ye.rank: + xe.rank++ + fallthrough + default: + ye.from = xr + } + return true +} + +func (ds disjointSet) find(n NI) NI { + // fast paths for n == root or from root. + // no updates need in these cases. + s := ds.set + fr := s[n].from + if fr < 0 { // n is root + return n + } + n, fr = fr, s[fr].from + if fr < 0 { // n is from root + return n + } + // otherwise updates needed. + // two iterative passes (rather than recursion or stack) + // pass 1: find root + r := fr + for { + f := s[r].from + if f < 0 { + break + } + r = f + } + // pass 2: update froms + for { + s[n].from = r + if fr == r { + return r + } + n = fr + fr = s[n].from + } +} + +// Kruskal implements Kruskal's algorithm for constructing a minimum spanning +// forest on an undirected graph. +// +// While the input graph is interpreted as undirected, the receiver edge list +// does not actually need to contain reciprocal arcs. A property of the +// algorithm is that arc direction is ignored. Thus only a single arc out of +// a reciprocal pair must be present in the edge list. Reciprocal arcs (and +// parallel arcs) are allowed though, and do not affect the result. +// +// The forest is returned as an undirected graph. +// +// Also returned is a total distance for the returned forest. +// +// The edge list of the receiver is sorted as a side effect of this method. +// See KruskalSorted for a version that relies on the edge list being already +// sorted. +func (l WeightedEdgeList) Kruskal() (g LabeledUndirected, dist float64) { + sort.Sort(l) + return l.KruskalSorted() +} + +// KruskalSorted implements Kruskal's algorithm for constructing a minimum +// spanning tree on an undirected graph. +// +// While the input graph is interpreted as undirected, the receiver edge list +// does not actually need to contain reciprocal arcs. A property of the +// algorithm is that arc direction is ignored. Thus only a single arc out of +// a reciprocal pair must be present in the edge list. Reciprocal arcs (and +// parallel arcs) are allowed though, and do not affect the result. +// +// When called, the edge list of the receiver must be already sorted by weight. +// See Kruskal for a version that accepts an unsorted edge list. +// +// The forest is returned as an undirected graph. +// +// Also returned is a total distance for the returned forest. +func (l WeightedEdgeList) KruskalSorted() (g LabeledUndirected, dist float64) { + ds := newDisjointSet(l.Order) + g.LabeledAdjacencyList = make(LabeledAdjacencyList, l.Order) + for _, e := range l.Edges { + if ds.union(e.N1, e.N2) { + g.AddEdge(Edge{e.N1, e.N2}, e.LI) + dist += l.WeightFunc(e.LI) + } + } + return +} + +// Prim implements the Jarník-Prim-Dijkstra algorithm for constructing +// a minimum spanning tree on an undirected graph. +// +// Prim computes a minimal spanning tree on the connected component containing +// the given start node. The tree is returned in FromList f. Argument f +// cannot be a nil pointer although it can point to a zero value FromList. +// +// If the passed FromList.Paths has the len of g though, it will be reused. +// In the case of a graph with multiple connected components, this allows a +// spanning forest to be accumulated by calling Prim successively on +// representative nodes of the components. In this case if leaves for +// individual trees are of interest, pass a non-nil zero-value for the argument +// componentLeaves and it will be populated with leaves for the single tree +// spanned by the call. +// +// If argument labels is non-nil, it must have the same length as g and will +// be populated with labels corresponding to the edges of the tree. +// +// Returned are the number of nodes spanned for the single tree (which will be +// the order of the connected component) and the total spanned distance for the +// single tree. +func (g LabeledUndirected) Prim(start NI, w WeightFunc, f *FromList, labels []LI, componentLeaves *Bits) (numSpanned int, dist float64) { + al := g.LabeledAdjacencyList + if len(f.Paths) != len(al) { + *f = NewFromList(len(al)) + } + b := make([]prNode, len(al)) // "best" + for n := range b { + b[n].nx = NI(n) + b[n].fx = -1 + } + rp := f.Paths + var frontier prHeap + rp[start] = PathEnd{From: -1, Len: 1} + numSpanned = 1 + fLeaves := &f.Leaves + fLeaves.SetBit(start, 1) + if componentLeaves != nil { + componentLeaves.SetBit(start, 1) + } + for a := start; ; { + for _, nb := range al[a] { + if rp[nb.To].Len > 0 { + continue // already in MST, no action + } + switch bp := &b[nb.To]; { + case bp.fx == -1: // new node for frontier + bp.from = fromHalf{From: a, Label: nb.Label} + bp.wt = w(nb.Label) + heap.Push(&frontier, bp) + case w(nb.Label) < bp.wt: // better arc + bp.from = fromHalf{From: a, Label: nb.Label} + bp.wt = w(nb.Label) + heap.Fix(&frontier, bp.fx) + } + } + if len(frontier) == 0 { + break // done + } + bp := heap.Pop(&frontier).(*prNode) + a = bp.nx + rp[a].Len = rp[bp.from.From].Len + 1 + rp[a].From = bp.from.From + if len(labels) != 0 { + labels[a] = bp.from.Label + } + dist += bp.wt + fLeaves.SetBit(bp.from.From, 0) + fLeaves.SetBit(a, 1) + if componentLeaves != nil { + componentLeaves.SetBit(bp.from.From, 0) + componentLeaves.SetBit(a, 1) + } + numSpanned++ + } + return +} + +// fromHalf is a half arc, representing a labeled arc and the "neighbor" node +// that the arc originates from. +// +// (This used to be exported when there was a LabeledFromList. Currently +// unexported now that it seems to have much more limited use.) +type fromHalf struct { + From NI + Label LI +} + +type prNode struct { + nx NI + from fromHalf + wt float64 // p.Weight(from.Label) + fx int +} + +type prHeap []*prNode + +func (h prHeap) Len() int { return len(h) } +func (h prHeap) Less(i, j int) bool { return h[i].wt < h[j].wt } +func (h prHeap) Swap(i, j int) { + h[i], h[j] = h[j], h[i] + h[i].fx = i + h[j].fx = j +} +func (p *prHeap) Push(x interface{}) { + nd := x.(*prNode) + nd.fx = len(*p) + *p = append(*p, nd) +} +func (p *prHeap) Pop() interface{} { + r := *p + last := len(r) - 1 + *p = r[:last] + return r[last] +} diff --git a/vendor/github.com/soniakeys/graph/random.go b/vendor/github.com/soniakeys/graph/random.go new file mode 100644 index 00000000..99f04458 --- /dev/null +++ b/vendor/github.com/soniakeys/graph/random.go @@ -0,0 +1,325 @@ +// Copyright 2016 Sonia Keys +// License MIT: https://opensource.org/licenses/MIT + +package graph + +import ( + "errors" + "math" + "math/rand" + "time" +) + +// Euclidean generates a random simple graph on the Euclidean plane. +// +// Nodes are associated with coordinates uniformly distributed on a unit +// square. Arcs are added between random nodes with a bias toward connecting +// nearer nodes. +// +// Unfortunately the function has a few "knobs". +// The returned graph will have order nNodes and arc size nArcs. The affinity +// argument controls the bias toward connecting nearer nodes. The function +// selects random pairs of nodes as a candidate arc then rejects the candidate +// if the nodes fail an affinity test. Also parallel arcs are rejected. +// As more affine or denser graphs are requested, rejections increase, +// increasing run time. The patience argument controls the number of arc +// rejections allowed before the function gives up and returns an error. +// Note that higher affinity will require more patience and that some +// combinations of nNodes and nArcs cannot be achieved with any amount of +// patience given that the returned graph must be simple. +// +// If Rand r is nil, the method creates a new source and generator for +// one-time use. +// +// Returned is a directed simple graph and associated positions indexed by +// node number. +// +// See also LabeledEuclidean. +func Euclidean(nNodes, nArcs int, affinity float64, patience int, r *rand.Rand) (g Directed, pos []struct{ X, Y float64 }, err error) { + a := make(AdjacencyList, nNodes) // graph + // generate random positions + if r == nil { + r = rand.New(rand.NewSource(time.Now().UnixNano())) + } + pos = make([]struct{ X, Y float64 }, nNodes) + for i := range pos { + pos[i].X = r.Float64() + pos[i].Y = r.Float64() + } + // arcs + var tooFar, dup int +arc: + for i := 0; i < nArcs; { + if tooFar == nArcs*patience { + err = errors.New("affinity not found") + return + } + if dup == nArcs*patience { + err = errors.New("overcrowding") + return + } + n1 := NI(r.Intn(nNodes)) + var n2 NI + for { + n2 = NI(r.Intn(nNodes)) + if n2 != n1 { // no graph loops + break + } + } + c1 := &pos[n1] + c2 := &pos[n2] + dist := math.Hypot(c2.X-c1.X, c2.Y-c1.Y) + if dist*affinity > r.ExpFloat64() { // favor near nodes + tooFar++ + continue + } + for _, nb := range a[n1] { + if nb == n2 { // no parallel arcs + dup++ + continue arc + } + } + a[n1] = append(a[n1], n2) + i++ + } + g = Directed{a} + return +} + +// LabeledEuclidean generates a random simple graph on the Euclidean plane. +// +// Arc label values in the returned graph g are indexes into the return value +// wt. Wt is the Euclidean distance between the from and to nodes of the arc. +// +// Otherwise the function arguments and return values are the same as for +// function Euclidean. See Euclidean. +func LabeledEuclidean(nNodes, nArcs int, affinity float64, patience int, r *rand.Rand) (g LabeledDirected, pos []struct{ X, Y float64 }, wt []float64, err error) { + a := make(LabeledAdjacencyList, nNodes) // graph + wt = make([]float64, nArcs) // arc weights + // generate random positions + if r == nil { + r = rand.New(rand.NewSource(time.Now().UnixNano())) + } + pos = make([]struct{ X, Y float64 }, nNodes) + for i := range pos { + pos[i].X = r.Float64() + pos[i].Y = r.Float64() + } + // arcs + var tooFar, dup int +arc: + for i := 0; i < nArcs; { + if tooFar == nArcs*patience { + err = errors.New("affinity not found") + return + } + if dup == nArcs*patience { + err = errors.New("overcrowding") + return + } + n1 := NI(r.Intn(nNodes)) + var n2 NI + for { + n2 = NI(r.Intn(nNodes)) + if n2 != n1 { // no graph loops + break + } + } + c1 := &pos[n1] + c2 := &pos[n2] + dist := math.Hypot(c2.X-c1.X, c2.Y-c1.Y) + if dist*affinity > r.ExpFloat64() { // favor near nodes + tooFar++ + continue + } + for _, nb := range a[n1] { + if nb.To == n2 { // no parallel arcs + dup++ + continue arc + } + } + wt[i] = dist + a[n1] = append(a[n1], Half{n2, LI(i)}) + i++ + } + g = LabeledDirected{a} + return +} + +// Geometric generates a random geometric graph (RGG) on the Euclidean plane. +// +// An RGG is an undirected simple graph. Nodes are associated with coordinates +// uniformly distributed on a unit square. Edges are added between all nodes +// falling within a specified distance or radius of each other. +// +// The resulting number of edges is somewhat random but asymptotically +// approaches m = Ï€r²n²/2. The method accumulates and returns the actual +// number of edges constructed. +// +// If Rand r is nil, the method creates a new source and generator for +// one-time use. +// +// See also LabeledGeometric. +func Geometric(nNodes int, radius float64, r *rand.Rand) (g Undirected, pos []struct{ X, Y float64 }, m int) { + // Expected degree is approximately nÏ€r². + a := make(AdjacencyList, nNodes) + if r == nil { + r = rand.New(rand.NewSource(time.Now().UnixNano())) + } + pos = make([]struct{ X, Y float64 }, nNodes) + for i := range pos { + pos[i].X = r.Float64() + pos[i].Y = r.Float64() + } + for u, up := range pos { + for v := u + 1; v < len(pos); v++ { + vp := pos[v] + if math.Hypot(up.X-vp.X, up.Y-vp.Y) < radius { + a[u] = append(a[u], NI(v)) + a[v] = append(a[v], NI(u)) + m++ + } + } + } + g = Undirected{a} + return +} + +// LabeledGeometric generates a random geometric graph (RGG) on the Euclidean +// plane. +// +// Edge label values in the returned graph g are indexes into the return value +// wt. Wt is the Euclidean distance between nodes of the edge. The graph +// size m is len(wt). +// +// See Geometric for additional description. +func LabeledGeometric(nNodes int, radius float64, r *rand.Rand) (g LabeledUndirected, pos []struct{ X, Y float64 }, wt []float64) { + a := make(LabeledAdjacencyList, nNodes) + if r == nil { + r = rand.New(rand.NewSource(time.Now().UnixNano())) + } + pos = make([]struct{ X, Y float64 }, nNodes) + for i := range pos { + pos[i].X = r.Float64() + pos[i].Y = r.Float64() + } + for u, up := range pos { + for v := u + 1; v < len(pos); v++ { + vp := pos[v] + if w := math.Hypot(up.X-vp.X, up.Y-vp.Y); w < radius { + a[u] = append(a[u], Half{NI(v), LI(len(wt))}) + a[v] = append(a[v], Half{NI(u), LI(len(wt))}) + wt = append(wt, w) + } + } + } + g = LabeledUndirected{a} + return +} + +// KroneckerDirected generates a Kronecker-like random directed graph. +// +// The returned graph g is simple and has no isolated nodes but is not +// necessarily fully connected. The number of of nodes will be <= 2^scale, +// and will be near 2^scale for typical values of arcFactor, >= 2. +// ArcFactor * 2^scale arcs are generated, although loops and duplicate arcs +// are rejected. +// +// If Rand r is nil, the method creates a new source and generator for +// one-time use. +// +// Return value ma is the number of arcs retained in the result graph. +func KroneckerDirected(scale uint, arcFactor float64, r *rand.Rand) (g Directed, ma int) { + a, m := kronecker(scale, arcFactor, true, r) + return Directed{a}, m +} + +// KroneckerUndirected generates a Kronecker-like random undirected graph. +// +// The returned graph g is simple and has no isolated nodes but is not +// necessarily fully connected. The number of of nodes will be <= 2^scale, +// and will be near 2^scale for typical values of edgeFactor, >= 2. +// EdgeFactor * 2^scale edges are generated, although loops and duplicate edges +// are rejected. +// +// If Rand r is nil, the method creates a new source and generator for +// one-time use. +// +// Return value m is the true number of edges--not arcs--retained in the result +// graph. +func KroneckerUndirected(scale uint, edgeFactor float64, r *rand.Rand) (g Undirected, m int) { + al, s := kronecker(scale, edgeFactor, false, r) + return Undirected{al}, s +} + +// Styled after the Graph500 example code. Not well tested currently. +// Graph500 example generates undirected only. No idea if the directed variant +// here is meaningful or not. +// +// note mma returns arc size ma for dir=true, but returns size m for dir=false +func kronecker(scale uint, edgeFactor float64, dir bool, r *rand.Rand) (g AdjacencyList, mma int) { + if r == nil { + r = rand.New(rand.NewSource(time.Now().UnixNano())) + } + N := NI(1 << scale) // node extent + M := int(edgeFactor*float64(N) + .5) // number of arcs/edges to generate + a, b, c := 0.57, 0.19, 0.19 // initiator probabilities + ab := a + b + cNorm := c / (1 - ab) + aNorm := a / ab + ij := make([][2]NI, M) + var bm Bits + var nNodes int + for k := range ij { + var i, j NI + for b := NI(1); b < N; b <<= 1 { + if r.Float64() > ab { + i |= b + if r.Float64() > cNorm { + j |= b + } + } else if r.Float64() > aNorm { + j |= b + } + } + if bm.Bit(i) == 0 { + bm.SetBit(i, 1) + nNodes++ + } + if bm.Bit(j) == 0 { + bm.SetBit(j, 1) + nNodes++ + } + r := r.Intn(k + 1) // shuffle edges as they are generated + ij[k] = ij[r] + ij[r] = [2]NI{i, j} + } + p := r.Perm(nNodes) // mapping to shuffle IDs of non-isolated nodes + px := 0 + rn := make([]NI, N) + for i := range rn { + if bm.Bit(NI(i)) == 1 { + rn[i] = NI(p[px]) // fill lookup table + px++ + } + } + g = make(AdjacencyList, nNodes) +ij: + for _, e := range ij { + if e[0] == e[1] { + continue // skip loops + } + ri, rj := rn[e[0]], rn[e[1]] + for _, nb := range g[ri] { + if nb == rj { + continue ij // skip parallel edges + } + } + g[ri] = append(g[ri], rj) + mma++ + if !dir { + g[rj] = append(g[rj], ri) + } + } + return +} diff --git a/vendor/github.com/soniakeys/graph/readme.md b/vendor/github.com/soniakeys/graph/readme.md new file mode 100644 index 00000000..539670ff --- /dev/null +++ b/vendor/github.com/soniakeys/graph/readme.md @@ -0,0 +1,38 @@ +#Graph + +A graph library with goals of speed and simplicity, Graph implements +graph algorithms on graphs of zero-based integer node IDs. + +[![GoDoc](https://godoc.org/github.com/soniakeys/graph?status.svg)](https://godoc.org/github.com/soniakeys/graph) [![Go Walker](http://gowalker.org/api/v1/badge)](https://gowalker.org/github.com/soniakeys/graph) [![GoSearch](http://go-search.org/badge?id=github.com%2Fsoniakeys%2Fgraph)](http://go-search.org/view?id=github.com%2Fsoniakeys%2Fgraph)[![Build Status](https://travis-ci.org/soniakeys/graph.svg?branch=master)](https://travis-ci.org/soniakeys/graph) + +Status, 4 Apr 2016: The repo has benefitted recently from being included +in another package. In response to users of that package, this repo now +builds for 32 bit Windows and ARM, and for Go versions back to 1.2.1. +Thank you all who have filed issues. + +###Non-source files of interest + +The directory [tutorials](tutorials) is a work in progress - there are only +a couple of tutorials there yet - but the concept is to provide some topical +walk-throughs to supplement godoc. The source-based godoc documentation +remains the primary documentation. + +* [Dijkstra's algorithm](tutorials/dijkstra.md) +* [AdjacencyList types](tutorials/adjacencylist.md) + +The directory [bench](bench) is another work in progress. The concept is +to present some plots showing benchmark performance approaching some +theoretical asymptote. + +[hacking.md](hacking.md) has some information about how the library is +developed, built, and tested. It might be of interest if for example you +plan to fork or contribute to the the repository. + +###Test coverage +8 Apr 2016 +``` +graph 95.3% +graph/df 20.7% +graph/dot 77.5% +graph/treevis 79.4% +``` diff --git a/vendor/github.com/soniakeys/graph/sssp.go b/vendor/github.com/soniakeys/graph/sssp.go new file mode 100644 index 00000000..32cc192e --- /dev/null +++ b/vendor/github.com/soniakeys/graph/sssp.go @@ -0,0 +1,881 @@ +// Copyright 2013 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +package graph + +import ( + "container/heap" + "fmt" + "math" +) + +// rNode holds data for a "reached" node +type rNode struct { + nx NI + state int8 // state constants defined below + f float64 // "g+h", path dist + heuristic estimate + fx int // heap.Fix index +} + +// for rNode.state +const ( + unreached = 0 + reached = 1 + open = 1 + closed = 2 +) + +type openHeap []*rNode + +// A Heuristic is defined on a specific end node. The function +// returns an estimate of the path distance from node argument +// "from" to the end node. Two subclasses of heuristics are "admissible" +// and "monotonic." +// +// Admissible means the value returned is guaranteed to be less than or +// equal to the actual shortest path distance from the node to end. +// +// An admissible estimate may further be monotonic. +// Monotonic means that for any neighboring nodes A and B with half arc aB +// leading from A to B, and for heuristic h defined on some end node, then +// h(A) <= aB.ArcWeight + h(B). +// +// See AStarA for additional notes on implementing heuristic functions for +// AStar search methods. +type Heuristic func(from NI) float64 + +// Admissible returns true if heuristic h is admissible on graph g relative to +// the given end node. +// +// If h is inadmissible, the string result describes a counter example. +func (h Heuristic) Admissible(g LabeledAdjacencyList, w WeightFunc, end NI) (bool, string) { + // invert graph + inv := make(LabeledAdjacencyList, len(g)) + for from, nbs := range g { + for _, nb := range nbs { + inv[nb.To] = append(inv[nb.To], + Half{To: NI(from), Label: nb.Label}) + } + } + // run dijkstra + // Dijkstra.AllPaths takes a start node but after inverting the graph + // argument end now represents the start node of the inverted graph. + f, dist, _ := inv.Dijkstra(end, -1, w) + // compare h to found shortest paths + for n := range inv { + if f.Paths[n].Len == 0 { + continue // no path, any heuristic estimate is fine. + } + if !(h(NI(n)) <= dist[n]) { + return false, fmt.Sprintf("h(%d) = %g, "+ + "required to be <= found shortest path (%g)", + n, h(NI(n)), dist[n]) + } + } + return true, "" +} + +// Monotonic returns true if heuristic h is monotonic on weighted graph g. +// +// If h is non-monotonic, the string result describes a counter example. +func (h Heuristic) Monotonic(g LabeledAdjacencyList, w WeightFunc) (bool, string) { + // precompute + hv := make([]float64, len(g)) + for n := range g { + hv[n] = h(NI(n)) + } + // iterate over all edges + for from, nbs := range g { + for _, nb := range nbs { + arcWeight := w(nb.Label) + if !(hv[from] <= arcWeight+hv[nb.To]) { + return false, fmt.Sprintf("h(%d) = %g, "+ + "required to be <= arc weight + h(%d) (= %g + %g = %g)", + from, hv[from], + nb.To, arcWeight, hv[nb.To], arcWeight+hv[nb.To]) + } + } + } + return true, "" +} + +// AStarA finds a path between two nodes. +// +// AStarA implements both algorithm A and algorithm A*. The difference in the +// two algorithms is strictly in the heuristic estimate returned by argument h. +// If h is an "admissible" heuristic estimate, then the algorithm is termed A*, +// otherwise it is algorithm A. +// +// Like Dijkstra's algorithm, AStarA with an admissible heuristic finds the +// shortest path between start and end. AStarA generally runs faster than +// Dijkstra though, by using the heuristic distance estimate. +// +// AStarA with an inadmissible heuristic becomes algorithm A. Algorithm A +// will find a path, but it is not guaranteed to be the shortest path. +// The heuristic still guides the search however, so a nearly admissible +// heuristic is likely to find a very good path, if not the best. Quality +// of the path returned degrades gracefully with the quality of the heuristic. +// +// The heuristic function h should ideally be fairly inexpensive. AStarA +// may call it more than once for the same node, especially as graph density +// increases. In some cases it may be worth the effort to memoize or +// precompute values. +// +// Argument g is the graph to be searched, with arc weights returned by w. +// As usual for AStar, arc weights must be non-negative. +// Graphs may be directed or undirected. +// +// If AStarA finds a path it returns a FromList encoding the path, the arc +// labels for path nodes, the total path distance, and ok = true. +// Otherwise it returns ok = false. +func (g LabeledAdjacencyList) AStarA(w WeightFunc, start, end NI, h Heuristic) (f FromList, labels []LI, dist float64, ok bool) { + // NOTE: AStarM is largely duplicate code. + + f = NewFromList(len(g)) + labels = make([]LI, len(g)) + d := make([]float64, len(g)) + r := make([]rNode, len(g)) + for i := range r { + r[i].nx = NI(i) + } + // start node is reached initially + cr := &r[start] + cr.state = reached + cr.f = h(start) // total path estimate is estimate from start + rp := f.Paths + rp[start] = PathEnd{Len: 1, From: -1} // path length at start is 1 node + // oh is a heap of nodes "open" for exploration. nodes go on the heap + // when they get an initial or new "g" path distance, and therefore a + // new "f" which serves as priority for exploration. + oh := openHeap{cr} + for len(oh) > 0 { + bestPath := heap.Pop(&oh).(*rNode) + bestNode := bestPath.nx + if bestNode == end { + return f, labels, d[end], true + } + bp := &rp[bestNode] + nextLen := bp.Len + 1 + for _, nb := range g[bestNode] { + alt := &r[nb.To] + ap := &rp[alt.nx] + // "g" path distance from start + g := d[bestNode] + w(nb.Label) + if alt.state == reached { + if g > d[nb.To] { + // candidate path to nb is longer than some alternate path + continue + } + if g == d[nb.To] && nextLen >= ap.Len { + // candidate path has identical length of some alternate + // path but it takes no fewer hops. + continue + } + // cool, we found a better way to get to this node. + // record new path data for this node and + // update alt with new data and make sure it's on the heap. + *ap = PathEnd{From: bestNode, Len: nextLen} + labels[nb.To] = nb.Label + d[nb.To] = g + alt.f = g + h(nb.To) + if alt.fx < 0 { + heap.Push(&oh, alt) + } else { + heap.Fix(&oh, alt.fx) + } + } else { + // bestNode being reached for the first time. + *ap = PathEnd{From: bestNode, Len: nextLen} + labels[nb.To] = nb.Label + d[nb.To] = g + alt.f = g + h(nb.To) + alt.state = reached + heap.Push(&oh, alt) // and it's now open for exploration + } + } + } + return // no path +} + +// AStarAPath finds a shortest path using the AStarA algorithm. +// +// This is a convenience method with a simpler result than the AStarA method. +// See documentation on the AStarA method. +// +// If a path is found, the non-nil node path is returned with the total path +// distance. Otherwise the returned path will be nil. +func (g LabeledAdjacencyList) AStarAPath(start, end NI, h Heuristic, w WeightFunc) ([]NI, float64) { + f, _, d, _ := g.AStarA(w, start, end, h) + return f.PathTo(end, nil), d +} + +// AStarM is AStarA optimized for monotonic heuristic estimates. +// +// Note that this function requires a monotonic heuristic. Results will +// not be meaningful if argument h is non-monotonic. +// +// See AStarA for general usage. See Heuristic for notes on monotonicity. +func (g LabeledAdjacencyList) AStarM(w WeightFunc, start, end NI, h Heuristic) (f FromList, labels []LI, dist float64, ok bool) { + // NOTE: AStarM is largely code duplicated from AStarA. + // Differences are noted in comments in this method. + + f = NewFromList(len(g)) + labels = make([]LI, len(g)) + d := make([]float64, len(g)) + r := make([]rNode, len(g)) + for i := range r { + r[i].nx = NI(i) + } + cr := &r[start] + + // difference from AStarA: + // instead of a bit to mark a reached node, there are two states, + // open and closed. open marks nodes "open" for exploration. + // nodes are marked open as they are reached, then marked + // closed as they are found to be on the best path. + cr.state = open + + cr.f = h(start) + rp := f.Paths + rp[start] = PathEnd{Len: 1, From: -1} + oh := openHeap{cr} + for len(oh) > 0 { + bestPath := heap.Pop(&oh).(*rNode) + bestNode := bestPath.nx + if bestNode == end { + return f, labels, d[end], true + } + + // difference from AStarA: + // move nodes to closed list as they are found to be best so far. + bestPath.state = closed + + bp := &rp[bestNode] + nextLen := bp.Len + 1 + for _, nb := range g[bestNode] { + alt := &r[nb.To] + + // difference from AStarA: + // Monotonicity means that f cannot be improved. + if alt.state == closed { + continue + } + + ap := &rp[alt.nx] + g := d[bestNode] + w(nb.Label) + + // difference from AStarA: + // test for open state, not just reached + if alt.state == open { + + if g > d[nb.To] { + continue + } + if g == d[nb.To] && nextLen >= ap.Len { + continue + } + *ap = PathEnd{From: bestNode, Len: nextLen} + labels[nb.To] = nb.Label + d[nb.To] = g + alt.f = g + h(nb.To) + + // difference from AStarA: + // we know alt was on the heap because we found it marked open + heap.Fix(&oh, alt.fx) + } else { + *ap = PathEnd{From: bestNode, Len: nextLen} + labels[nb.To] = nb.Label + d[nb.To] = g + alt.f = g + h(nb.To) + + // difference from AStarA: + // nodes are opened when first reached + alt.state = open + heap.Push(&oh, alt) + } + } + } + return +} + +// AStarMPath finds a shortest path using the AStarM algorithm. +// +// This is a convenience method with a simpler result than the AStarM method. +// See documentation on the AStarM and AStarA methods. +// +// If a path is found, the non-nil node path is returned with the total path +// distance. Otherwise the returned path will be nil. +func (g LabeledAdjacencyList) AStarMPath(start, end NI, h Heuristic, w WeightFunc) ([]NI, float64) { + f, _, d, _ := g.AStarM(w, start, end, h) + return f.PathTo(end, nil), d +} + +// implement container/heap +func (h openHeap) Len() int { return len(h) } +func (h openHeap) Less(i, j int) bool { return h[i].f < h[j].f } +func (h openHeap) Swap(i, j int) { + h[i], h[j] = h[j], h[i] + h[i].fx = i + h[j].fx = j +} +func (p *openHeap) Push(x interface{}) { + h := *p + fx := len(h) + h = append(h, x.(*rNode)) + h[fx].fx = fx + *p = h +} + +func (p *openHeap) Pop() interface{} { + h := *p + last := len(h) - 1 + *p = h[:last] + h[last].fx = -1 + return h[last] +} + +// BellmanFord finds shortest paths from a start node in a weighted directed +// graph using the Bellman-Ford-Moore algorithm. +// +// WeightFunc w must translate arc labels to arc weights. +// Negative arc weights are allowed but not negative cycles. +// Loops and parallel arcs are allowed. +// +// If the algorithm completes without encountering a negative cycle the method +// returns shortest paths encoded in a FromList, path distances indexed by +// node, and return value end = -1. +// +// If it encounters a negative cycle reachable from start it returns end >= 0. +// In this case the cycle can be obtained by calling f.BellmanFordCycle(end). +// +// Negative cycles are only detected when reachable from start. A negative +// cycle not reachable from start will not prevent the algorithm from finding +// shortest paths from start. +// +// See also NegativeCycle to find a cycle anywhere in the graph, and see +// HasNegativeCycle for lighter-weight negative cycle detection, +func (g LabeledDirected) BellmanFord(w WeightFunc, start NI) (f FromList, dist []float64, end NI) { + a := g.LabeledAdjacencyList + f = NewFromList(len(a)) + dist = make([]float64, len(a)) + inf := math.Inf(1) + for i := range dist { + dist[i] = inf + } + rp := f.Paths + rp[start] = PathEnd{Len: 1, From: -1} + dist[start] = 0 + for _ = range a[1:] { + imp := false + for from, nbs := range a { + fp := &rp[from] + d1 := dist[from] + for _, nb := range nbs { + d2 := d1 + w(nb.Label) + to := &rp[nb.To] + // TODO improve to break ties + if fp.Len > 0 && d2 < dist[nb.To] { + *to = PathEnd{From: NI(from), Len: fp.Len + 1} + dist[nb.To] = d2 + imp = true + } + } + } + if !imp { + break + } + } + for from, nbs := range a { + d1 := dist[from] + for _, nb := range nbs { + if d1+w(nb.Label) < dist[nb.To] { + // return nb as end of a path with negative cycle at root + return f, dist, NI(from) + } + } + } + return f, dist, -1 +} + +// BellmanFordCycle decodes a negative cycle detected by BellmanFord. +// +// Receiver f and argument end must be results returned from BellmanFord. +func (f FromList) BellmanFordCycle(end NI) (c []NI) { + p := f.Paths + var b Bits + for b.Bit(end) == 0 { + b.SetBit(end, 1) + end = p[end].From + } + for b.Bit(end) == 1 { + c = append(c, end) + b.SetBit(end, 0) + end = p[end].From + } + for i, j := 0, len(c)-1; i < j; i, j = i+1, j-1 { + c[i], c[j] = c[j], c[i] + } + return +} + +// HasNegativeCycle returns true if the graph contains any negative cycle. +// +// HasNegativeCycle uses a Bellman-Ford-like algorithm, but finds negative +// cycles anywhere in the graph. Also path information is not computed, +// reducing memory use somewhat compared to BellmanFord. +// +// See also NegativeCycle to obtain the cycle, and see BellmanFord for +// single source shortest path searches. +func (g LabeledDirected) HasNegativeCycle(w WeightFunc) bool { + a := g.LabeledAdjacencyList + dist := make([]float64, len(a)) + for _ = range a[1:] { + imp := false + for from, nbs := range a { + d1 := dist[from] + for _, nb := range nbs { + d2 := d1 + w(nb.Label) + if d2 < dist[nb.To] { + dist[nb.To] = d2 + imp = true + } + } + } + if !imp { + break + } + } + for from, nbs := range a { + d1 := dist[from] + for _, nb := range nbs { + if d1+w(nb.Label) < dist[nb.To] { + return true // negative cycle + } + } + } + return false +} + +// NegativeCycle finds a negative cycle if one exists. +// +// NegativeCycle uses a Bellman-Ford-like algorithm, but finds negative +// cycles anywhere in the graph. If a negative cycle exists, one will be +// returned. The result is nil if no negative cycle exists. +// +// See also HasNegativeCycle for lighter-weight cycle detection, and see +// BellmanFord for single source shortest paths. +func (g LabeledDirected) NegativeCycle(w WeightFunc) (c []NI) { + a := g.LabeledAdjacencyList + f := NewFromList(len(a)) + p := f.Paths + for n := range p { + p[n] = PathEnd{From: -1, Len: 1} + } + dist := make([]float64, len(a)) + for _ = range a { + imp := false + for from, nbs := range a { + fp := &p[from] + d1 := dist[from] + for _, nb := range nbs { + d2 := d1 + w(nb.Label) + to := &p[nb.To] + if fp.Len > 0 && d2 < dist[nb.To] { + *to = PathEnd{From: NI(from), Len: fp.Len + 1} + dist[nb.To] = d2 + imp = true + } + } + } + if !imp { + return nil + } + } + var vis Bits +a: + for n := range a { + end := NI(n) + var b Bits + for b.Bit(end) == 0 { + if vis.Bit(end) == 1 { + continue a + } + vis.SetBit(end, 1) + b.SetBit(end, 1) + end = p[end].From + if end < 0 { + continue a + } + } + for b.Bit(end) == 1 { + c = append(c, end) + b.SetBit(end, 0) + end = p[end].From + } + for i, j := 0, len(c)-1; i < j; i, j = i+1, j-1 { + c[i], c[j] = c[j], c[i] + } + return c + } + return nil // no negative cycle +} + +// A NodeVisitor is an argument to some graph traversal methods. +// +// Graph traversal methods call the visitor function for each node visited. +// Argument n is the node being visited. +type NodeVisitor func(n NI) + +// An OkNodeVisitor function is an argument to some graph traversal methods. +// +// Graph traversal methods call the visitor function for each node visited. +// The argument n is the node being visited. If the visitor function +// returns true, the traversal will continue. If the visitor function +// returns false, the traversal will terminate immediately. +type OkNodeVisitor func(n NI) (ok bool) + +// BreadthFirst2 traverses a graph breadth first using a direction +// optimizing algorithm. +// +// The code is experimental and currently seems no faster than the +// conventional breadth first code. +// +// Use AdjacencyList.BreadthFirst instead. +func BreadthFirst2(g, tr AdjacencyList, ma int, start NI, f *FromList, v OkNodeVisitor) int { + if tr == nil { + var d Directed + d, ma = Directed{g}.Transpose() + tr = d.AdjacencyList + } + switch { + case f == nil: + e := NewFromList(len(g)) + f = &e + case f.Paths == nil: + *f = NewFromList(len(g)) + } + if ma <= 0 { + ma = g.ArcSize() + } + rp := f.Paths + level := 1 + rp[start] = PathEnd{Len: level, From: -1} + if !v(start) { + f.MaxLen = level + return -1 + } + nReached := 1 // accumulated for a return value + // the frontier consists of nodes all at the same level + frontier := []NI{start} + mf := len(g[start]) // number of arcs leading out from frontier + ctb := ma / 10 // threshold change from top-down to bottom-up + k14 := 14 * ma / len(g) // 14 * mean degree + cbt := len(g) / k14 // threshold change from bottom-up to top-down + // var fBits, nextb big.Int + fBits := make([]bool, len(g)) + nextb := make([]bool, len(g)) + zBits := make([]bool, len(g)) + for { + // top down step + level++ + var next []NI + for _, n := range frontier { + for _, nb := range g[n] { + if rp[nb].Len == 0 { + rp[nb] = PathEnd{From: n, Len: level} + if !v(nb) { + f.MaxLen = level + return -1 + } + next = append(next, nb) + nReached++ + } + } + } + if len(next) == 0 { + break + } + frontier = next + if mf > ctb { + // switch to bottom up! + } else { + // stick with top down + continue + } + // convert frontier representation + nf := 0 // number of vertices on the frontier + for _, n := range frontier { + // fBits.SetBit(&fBits, n, 1) + fBits[n] = true + nf++ + } + bottomUpLoop: + level++ + nNext := 0 + for n := range tr { + if rp[n].Len == 0 { + for _, nb := range tr[n] { + // if fBits.Bit(nb) == 1 { + if fBits[nb] { + rp[n] = PathEnd{From: nb, Len: level} + if !v(nb) { + f.MaxLen = level + return -1 + } + // nextb.SetBit(&nextb, n, 1) + nextb[n] = true + nReached++ + nNext++ + break + } + } + } + } + if nNext == 0 { + break + } + fBits, nextb = nextb, fBits + // nextb.SetInt64(0) + copy(nextb, zBits) + nf = nNext + if nf < cbt { + // switch back to top down! + } else { + // stick with bottom up + goto bottomUpLoop + } + // convert frontier representation + mf = 0 + frontier = frontier[:0] + for n := range g { + // if fBits.Bit(n) == 1 { + if fBits[n] { + frontier = append(frontier, NI(n)) + mf += len(g[n]) + fBits[n] = false + } + } + // fBits.SetInt64(0) + } + f.MaxLen = level - 1 + return nReached +} + +// DAGMinDistPath finds a single shortest path. +// +// Shortest means minimum sum of arc weights. +// +// Returned is the path and distance as returned by FromList.PathTo. +// +// This is a convenience method. See DAGOptimalPaths for more options. +func (g LabeledDirected) DAGMinDistPath(start, end NI, w WeightFunc) ([]NI, float64, error) { + return g.dagPath(start, end, w, false) +} + +// DAGMaxDistPath finds a single longest path. +// +// Longest means maximum sum of arc weights. +// +// Returned is the path and distance as returned by FromList.PathTo. +// +// This is a convenience method. See DAGOptimalPaths for more options. +func (g LabeledDirected) DAGMaxDistPath(start, end NI, w WeightFunc) ([]NI, float64, error) { + return g.dagPath(start, end, w, true) +} + +func (g LabeledDirected) dagPath(start, end NI, w WeightFunc, longest bool) ([]NI, float64, error) { + o, _ := g.Topological() + if o == nil { + return nil, 0, fmt.Errorf("not a DAG") + } + f, dist, _ := g.DAGOptimalPaths(start, end, o, w, longest) + if f.Paths[end].Len == 0 { + return nil, 0, fmt.Errorf("no path from %d to %d", start, end) + } + return f.PathTo(end, nil), dist[end], nil +} + +// DAGOptimalPaths finds either longest or shortest distance paths in a +// directed acyclic graph. +// +// Path distance is the sum of arc weights on the path. +// Negative arc weights are allowed. +// Where multiple paths exist with the same distance, the path length +// (number of nodes) is used as a tie breaker. +// +// Receiver g must be a directed acyclic graph. Argument o must be either nil +// or a topological ordering of g. If nil, a topologcal ordering is +// computed internally. If longest is true, an optimal path is a longest +// distance path. Otherwise it is a shortest distance path. +// +// Argument start is the start node for paths, end is the end node. If end +// is a valid node number, the method returns as soon as the optimal path +// to end is found. If end is -1, all optimal paths from start are found. +// +// Paths and path distances are encoded in the returned FromList and dist +// slice. The number of nodes reached is returned as nReached. +func (g LabeledDirected) DAGOptimalPaths(start, end NI, ordering []NI, w WeightFunc, longest bool) (f FromList, dist []float64, nReached int) { + a := g.LabeledAdjacencyList + f = NewFromList(len(a)) + dist = make([]float64, len(a)) + if ordering == nil { + ordering, _ = g.Topological() + } + // search ordering for start + o := 0 + for ordering[o] != start { + o++ + } + var fBetter func(cand, ext float64) bool + var iBetter func(cand, ext int) bool + if longest { + fBetter = func(cand, ext float64) bool { return cand > ext } + iBetter = func(cand, ext int) bool { return cand > ext } + } else { + fBetter = func(cand, ext float64) bool { return cand < ext } + iBetter = func(cand, ext int) bool { return cand < ext } + } + p := f.Paths + p[start] = PathEnd{From: -1, Len: 1} + f.MaxLen = 1 + leaves := &f.Leaves + leaves.SetBit(start, 1) + nReached = 1 + for n := start; n != end; n = ordering[o] { + if p[n].Len > 0 && len(a[n]) > 0 { + nDist := dist[n] + candLen := p[n].Len + 1 // len for any candidate arc followed from n + for _, to := range a[n] { + leaves.SetBit(to.To, 1) + candDist := nDist + w(to.Label) + switch { + case p[to.To].Len == 0: // first path to node to.To + nReached++ + case fBetter(candDist, dist[to.To]): // better distance + case candDist == dist[to.To] && iBetter(candLen, p[to.To].Len): // same distance but better path length + default: + continue + } + dist[to.To] = candDist + p[to.To] = PathEnd{From: n, Len: candLen} + if candLen > f.MaxLen { + f.MaxLen = candLen + } + } + leaves.SetBit(n, 0) + } + o++ + if o == len(ordering) { + break + } + } + return +} + +// Dijkstra finds shortest paths by Dijkstra's algorithm. +// +// Shortest means shortest distance where distance is the +// sum of arc weights. Where multiple paths exist with the same distance, +// a path with the minimum number of nodes is returned. +// +// As usual for Dijkstra's algorithm, arc weights must be non-negative. +// Graphs may be directed or undirected. Loops and parallel arcs are +// allowed. +func (g LabeledAdjacencyList) Dijkstra(start, end NI, w WeightFunc) (f FromList, dist []float64, reached int) { + r := make([]tentResult, len(g)) + for i := range r { + r[i].nx = NI(i) + } + f = NewFromList(len(g)) + dist = make([]float64, len(g)) + current := start + rp := f.Paths + rp[current] = PathEnd{Len: 1, From: -1} // path length at start is 1 node + cr := &r[current] + cr.dist = 0 // distance at start is 0. + cr.done = true // mark start done. it skips the heap. + nDone := 1 // accumulated for a return value + var t tent + for current != end { + nextLen := rp[current].Len + 1 + for _, nb := range g[current] { + // d.arcVis++ + hr := &r[nb.To] + if hr.done { + continue // skip nodes already done + } + dist := cr.dist + w(nb.Label) + vl := rp[nb.To].Len + visited := vl > 0 + if visited { + if dist > hr.dist { + continue // distance is worse + } + // tie breaker is a nice touch and doesn't seem to + // impact performance much. + if dist == hr.dist && nextLen >= vl { + continue // distance same, but number of nodes is no better + } + } + // the path through current to this node is shortest so far. + // record new path data for this node and update tentative set. + hr.dist = dist + rp[nb.To].Len = nextLen + rp[nb.To].From = current + if visited { + heap.Fix(&t, hr.fx) + } else { + heap.Push(&t, hr) + } + } + //d.ndVis++ + if len(t) == 0 { + return f, dist, nDone // no more reachable nodes. AllPaths normal return + } + // new current is node with smallest tentative distance + cr = heap.Pop(&t).(*tentResult) + cr.done = true + nDone++ + current = cr.nx + dist[current] = cr.dist // store final distance + } + // normal return for single shortest path search + return f, dist, -1 +} + +// DijkstraPath finds a single shortest path. +// +// Returned is the path and distance as returned by FromList.PathTo. +func (g LabeledAdjacencyList) DijkstraPath(start, end NI, w WeightFunc) ([]NI, float64) { + f, dist, _ := g.Dijkstra(start, end, w) + return f.PathTo(end, nil), dist[end] +} + +// tent implements container/heap +func (t tent) Len() int { return len(t) } +func (t tent) Less(i, j int) bool { return t[i].dist < t[j].dist } +func (t tent) Swap(i, j int) { + t[i], t[j] = t[j], t[i] + t[i].fx = i + t[j].fx = j +} +func (s *tent) Push(x interface{}) { + nd := x.(*tentResult) + nd.fx = len(*s) + *s = append(*s, nd) +} +func (s *tent) Pop() interface{} { + t := *s + last := len(t) - 1 + *s = t[:last] + return t[last] +} + +type tentResult struct { + dist float64 // tentative distance, sum of arc weights + nx NI // slice index, "node id" + fx int // heap.Fix index + done bool +} + +type tent []*tentResult diff --git a/vendor/github.com/soniakeys/graph/travis.sh b/vendor/github.com/soniakeys/graph/travis.sh new file mode 100644 index 00000000..5a8030ac --- /dev/null +++ b/vendor/github.com/soniakeys/graph/travis.sh @@ -0,0 +1,11 @@ +#!/bin/bash +set -ex +go test ./... +if [ "$TRAVIS_GO_VERSION" = "1.6" ]; then + GOARCH=386 go test ./... + go tool vet -example . + go get github.com/client9/misspell/cmd/misspell + go get github.com/soniakeys/vetc + misspell -error * */* */*/* + vetc +fi diff --git a/vendor/github.com/soniakeys/graph/undir.go b/vendor/github.com/soniakeys/graph/undir.go new file mode 100644 index 00000000..75a7f248 --- /dev/null +++ b/vendor/github.com/soniakeys/graph/undir.go @@ -0,0 +1,321 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +package graph + +// undir.go has methods specific to undirected graphs, Undirected and +// LabeledUndirected. + +import "errors" + +// AddEdge adds an edge to a graph. +// +// It can be useful for constructing undirected graphs. +// +// When n1 and n2 are distinct, it adds the arc n1->n2 and the reciprocal +// n2->n1. When n1 and n2 are the same, it adds a single arc loop. +// +// The pointer receiver allows the method to expand the graph as needed +// to include the values n1 and n2. If n1 or n2 happen to be greater than +// len(*p) the method does not panic, but simply expands the graph. +func (p *Undirected) AddEdge(n1, n2 NI) { + // Similar code in LabeledAdjacencyList.AddEdge. + + // determine max of the two end points + max := n1 + if n2 > max { + max = n2 + } + // expand graph if needed, to include both + g := p.AdjacencyList + if int(max) >= len(g) { + p.AdjacencyList = make(AdjacencyList, max+1) + copy(p.AdjacencyList, g) + g = p.AdjacencyList + } + // create one half-arc, + g[n1] = append(g[n1], n2) + // and except for loops, create the reciprocal + if n1 != n2 { + g[n2] = append(g[n2], n1) + } +} + +// EulerianCycleD for undirected graphs is a bit of an experiment. +// +// It is about the same as the directed version, but modified for an undirected +// multigraph. +// +// Parameter m in this case must be the size of the undirected graph -- the +// number of edges. Use Undirected.Size if the size is unknown. +// +// It works, but contains an extra loop that I think spoils the time +// complexity. Probably still pretty fast in practice, but a different +// graph representation might be better. +func (g Undirected) EulerianCycleD(m int) ([]NI, error) { + if len(g.AdjacencyList) == 0 { + return nil, nil + } + e := newEulerian(g.AdjacencyList, m) + for e.s >= 0 { + v := e.top() + e.pushUndir() // call modified method + if e.top() != v { + return nil, errors.New("not balanced") + } + e.keep() + } + if !e.uv.Zero() { + return nil, errors.New("not strongly connected") + } + return e.p, nil +} + +// TarjanBiconnectedComponents decomposes a graph into maximal biconnected +// components, components for which if any node were removed the component +// would remain connected. +// +// The receiver g must be a simple graph. The method calls the emit argument +// for each component identified, as long as emit returns true. If emit +// returns false, TarjanBiconnectedComponents returns immediately. +// +// See also the eqivalent labeled TarjanBiconnectedComponents. +func (g Undirected) TarjanBiconnectedComponents(emit func([]Edge) bool) { + // Implemented closely to pseudocode in "Depth-first search and linear + // graph algorithms", Robert Tarjan, SIAM J. Comput. Vol. 1, No. 2, + // June 1972. + // + // Note Tarjan's "adjacency structure" is graph.AdjacencyList, + // His "adjacency list" is an element of a graph.AdjacencyList, also + // termed a "to-list", "neighbor list", or "child list." + number := make([]int, len(g.AdjacencyList)) + lowpt := make([]int, len(g.AdjacencyList)) + var stack []Edge + var i int + var biconnect func(NI, NI) bool + biconnect = func(v, u NI) bool { + i++ + number[v] = i + lowpt[v] = i + for _, w := range g.AdjacencyList[v] { + if number[w] == 0 { + stack = append(stack, Edge{v, w}) + if !biconnect(w, v) { + return false + } + if lowpt[w] < lowpt[v] { + lowpt[v] = lowpt[w] + } + if lowpt[w] >= number[v] { + var bcc []Edge + top := len(stack) - 1 + for number[stack[top].N1] >= number[w] { + bcc = append(bcc, stack[top]) + stack = stack[:top] + top-- + } + bcc = append(bcc, stack[top]) + stack = stack[:top] + top-- + if !emit(bcc) { + return false + } + } + } else if number[w] < number[v] && w != u { + stack = append(stack, Edge{v, w}) + if number[w] < lowpt[v] { + lowpt[v] = number[w] + } + } + } + return true + } + for w := range g.AdjacencyList { + if number[w] == 0 && !biconnect(NI(w), 0) { + return + } + } +} + +/* half-baked. Read the 72 paper. Maybe revisit at some point. +type BiconnectedComponents struct { + Graph AdjacencyList + Start int + Cuts big.Int // bitmap of node cuts + From []int // from-tree + Leaves []int // leaves of from-tree +} + +func NewBiconnectedComponents(g Undirected) *BiconnectedComponents { + return &BiconnectedComponents{ + Graph: g, + From: make([]int, len(g)), + } +} + +func (b *BiconnectedComponents) Find(start int) { + g := b.Graph + depth := make([]int, len(g)) + low := make([]int, len(g)) + // reset from any previous run + b.Cuts.SetInt64(0) + bf := b.From + for n := range bf { + bf[n] = -1 + } + b.Leaves = b.Leaves[:0] + d := 1 // depth. d > 0 means visited + depth[start] = d + low[start] = d + d++ + var df func(int, int) + df = func(from, n int) { + bf[n] = from + depth[n] = d + dn := d + l := d + d++ + cut := false + leaf := true + for _, nb := range g[n] { + if depth[nb] == 0 { + leaf = false + df(n, nb) + if low[nb] < l { + l = low[nb] + } + if low[nb] >= dn { + cut = true + } + } else if nb != from && depth[nb] < l { + l = depth[nb] + } + } + low[n] = l + if cut { + b.Cuts.SetBit(&b.Cuts, n, 1) + } + if leaf { + b.Leaves = append(b.Leaves, n) + } + d-- + } + nbs := g[start] + if len(nbs) == 0 { + return + } + df(start, nbs[0]) + var rc uint + for _, nb := range nbs[1:] { + if depth[nb] == 0 { + rc = 1 + df(start, nb) + } + } + b.Cuts.SetBit(&b.Cuts, start, rc) + return +} +*/ + +// AddEdge adds an edge to a labeled graph. +// +// It can be useful for constructing undirected graphs. +// +// When n1 and n2 are distinct, it adds the arc n1->n2 and the reciprocal +// n2->n1. When n1 and n2 are the same, it adds a single arc loop. +// +// If the edge already exists in *p, a parallel edge is added. +// +// The pointer receiver allows the method to expand the graph as needed +// to include the values n1 and n2. If n1 or n2 happen to be greater than +// len(*p) the method does not panic, but simply expands the graph. +func (p *LabeledUndirected) AddEdge(e Edge, l LI) { + // Similar code in AdjacencyList.AddEdge. + + // determine max of the two end points + max := e.N1 + if e.N2 > max { + max = e.N2 + } + // expand graph if needed, to include both + g := p.LabeledAdjacencyList + if max >= NI(len(g)) { + p.LabeledAdjacencyList = make(LabeledAdjacencyList, max+1) + copy(p.LabeledAdjacencyList, g) + g = p.LabeledAdjacencyList + } + // create one half-arc, + g[e.N1] = append(g[e.N1], Half{To: e.N2, Label: l}) + // and except for loops, create the reciprocal + if e.N1 != e.N2 { + g[e.N2] = append(g[e.N2], Half{To: e.N1, Label: l}) + } +} + +// TarjanBiconnectedComponents decomposes a graph into maximal biconnected +// components, components for which if any node were removed the component +// would remain connected. +// +// The receiver g must be a simple graph. The method calls the emit argument +// for each component identified, as long as emit returns true. If emit +// returns false, TarjanBiconnectedComponents returns immediately. +// +// See also the eqivalent unlabeled TarjanBiconnectedComponents. +func (g LabeledUndirected) TarjanBiconnectedComponents(emit func([]LabeledEdge) bool) { + // Implemented closely to pseudocode in "Depth-first search and linear + // graph algorithms", Robert Tarjan, SIAM J. Comput. Vol. 1, No. 2, + // June 1972. + // + // Note Tarjan's "adjacency structure" is graph.AdjacencyList, + // His "adjacency list" is an element of a graph.AdjacencyList, also + // termed a "to-list", "neighbor list", or "child list." + // + // Nearly identical code in undir.go. + number := make([]int, len(g.LabeledAdjacencyList)) + lowpt := make([]int, len(g.LabeledAdjacencyList)) + var stack []LabeledEdge + var i int + var biconnect func(NI, NI) bool + biconnect = func(v, u NI) bool { + i++ + number[v] = i + lowpt[v] = i + for _, w := range g.LabeledAdjacencyList[v] { + if number[w.To] == 0 { + stack = append(stack, LabeledEdge{Edge{v, w.To}, w.Label}) + if !biconnect(w.To, v) { + return false + } + if lowpt[w.To] < lowpt[v] { + lowpt[v] = lowpt[w.To] + } + if lowpt[w.To] >= number[v] { + var bcc []LabeledEdge + top := len(stack) - 1 + for number[stack[top].N1] >= number[w.To] { + bcc = append(bcc, stack[top]) + stack = stack[:top] + top-- + } + bcc = append(bcc, stack[top]) + stack = stack[:top] + top-- + if !emit(bcc) { + return false + } + } + } else if number[w.To] < number[v] && w.To != u { + stack = append(stack, LabeledEdge{Edge{v, w.To}, w.Label}) + if number[w.To] < lowpt[v] { + lowpt[v] = number[w.To] + } + } + } + return true + } + for w := range g.LabeledAdjacencyList { + if number[w] == 0 && !biconnect(NI(w), 0) { + return + } + } +} diff --git a/vendor/github.com/soniakeys/graph/undir_RO.go b/vendor/github.com/soniakeys/graph/undir_RO.go new file mode 100644 index 00000000..fd8e3778 --- /dev/null +++ b/vendor/github.com/soniakeys/graph/undir_RO.go @@ -0,0 +1,659 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +package graph + +// undir_RO.go is code generated from undir_cg.go by directives in graph.go. +// Editing undir_cg.go is okay. It is the code generation source. +// DO NOT EDIT undir_RO.go. +// The RO means read only and it is upper case RO to slow you down a bit +// in case you start to edit the file. + +// Bipartite determines if a connected component of an undirected graph +// is bipartite, a component where nodes can be partitioned into two sets +// such that every edge in the component goes from one set to the other. +// +// Argument n can be any representative node of the component. +// +// If the component is bipartite, Bipartite returns true and a two-coloring +// of the component. Each color set is returned as a bitmap. If the component +// is not bipartite, Bipartite returns false and a representative odd cycle. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Undirected) Bipartite(n NI) (b bool, c1, c2 Bits, oc []NI) { + b = true + var open bool + var df func(n NI, c1, c2 *Bits) + df = func(n NI, c1, c2 *Bits) { + c1.SetBit(n, 1) + for _, nb := range g.AdjacencyList[n] { + if c1.Bit(nb) == 1 { + b = false + oc = []NI{nb, n} + open = true + return + } + if c2.Bit(nb) == 1 { + continue + } + df(nb, c2, c1) + if b { + continue + } + switch { + case !open: + case n == oc[0]: + open = false + default: + oc = append(oc, n) + } + return + } + } + df(n, &c1, &c2) + if b { + return b, c1, c2, nil + } + return b, Bits{}, Bits{}, oc +} + +// BronKerbosch1 finds maximal cliques in an undirected graph. +// +// The graph must not contain parallel edges or loops. +// +// See https://en.wikipedia.org/wiki/Clique_(graph_theory) and +// https://en.wikipedia.org/wiki/Bron%E2%80%93Kerbosch_algorithm for background. +// +// This method implements the BronKerbosch1 algorithm of WP; that is, +// the original algorithm without improvements. +// +// The method calls the emit argument for each maximal clique in g, as long +// as emit returns true. If emit returns false, BronKerbosch1 returns +// immediately. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also more sophisticated variants BronKerbosch2 and BronKerbosch3. +func (g Undirected) BronKerbosch1(emit func([]NI) bool) { + a := g.AdjacencyList + var f func(R, P, X *Bits) bool + f = func(R, P, X *Bits) bool { + switch { + case !P.Zero(): + var r2, p2, x2 Bits + pf := func(n NI) bool { + r2.Set(*R) + r2.SetBit(n, 1) + p2.Clear() + x2.Clear() + for _, to := range a[n] { + if P.Bit(to) == 1 { + p2.SetBit(to, 1) + } + if X.Bit(to) == 1 { + x2.SetBit(to, 1) + } + } + if !f(&r2, &p2, &x2) { + return false + } + P.SetBit(n, 0) + X.SetBit(n, 1) + return true + } + if !P.Iterate(pf) { + return false + } + case X.Zero(): + return emit(R.Slice()) + } + return true + } + var R, P, X Bits + P.SetAll(len(a)) + f(&R, &P, &X) +} + +// BKPivotMaxDegree is a strategy for BronKerbosch methods. +// +// To use it, take the method value (see golang.org/ref/spec#Method_values) +// and pass it as the argument to BronKerbosch2 or 3. +// +// The strategy is to pick the node from P or X with the maximum degree +// (number of edges) in g. Note this is a shortcut from evaluating degrees +// in P. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Undirected) BKPivotMaxDegree(P, X *Bits) (p NI) { + // choose pivot u as highest degree node from P or X + a := g.AdjacencyList + maxDeg := -1 + P.Iterate(func(n NI) bool { // scan P + if d := len(a[n]); d > maxDeg { + p = n + maxDeg = d + } + return true + }) + X.Iterate(func(n NI) bool { // scan X + if d := len(a[n]); d > maxDeg { + p = n + maxDeg = d + } + return true + }) + return +} + +// BKPivotMinP is a strategy for BronKerbosch methods. +// +// To use it, take the method value (see golang.org/ref/spec#Method_values) +// and pass it as the argument to BronKerbosch2 or 3. +// +// The strategy is to simply pick the first node in P. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Undirected) BKPivotMinP(P, X *Bits) NI { + return P.From(0) +} + +// BronKerbosch2 finds maximal cliques in an undirected graph. +// +// The graph must not contain parallel edges or loops. +// +// See https://en.wikipedia.org/wiki/Clique_(graph_theory) and +// https://en.wikipedia.org/wiki/Bron%E2%80%93Kerbosch_algorithm for background. +// +// This method implements the BronKerbosch2 algorithm of WP; that is, +// the original algorithm plus pivoting. +// +// The argument is a pivot function that must return a node of P or X. +// P is guaranteed to contain at least one node. X is not. +// For example see BKPivotMaxDegree. +// +// The method calls the emit argument for each maximal clique in g, as long +// as emit returns true. If emit returns false, BronKerbosch1 returns +// immediately. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also simpler variant BronKerbosch1 and more sophisticated variant +// BronKerbosch3. +func (g Undirected) BronKerbosch2(pivot func(P, X *Bits) NI, emit func([]NI) bool) { + a := g.AdjacencyList + var f func(R, P, X *Bits) bool + f = func(R, P, X *Bits) bool { + switch { + case !P.Zero(): + var r2, p2, x2, pnu Bits + // compute P \ N(u). next 5 lines are only difference from BK1 + pnu.Set(*P) + for _, to := range a[pivot(P, X)] { + pnu.SetBit(to, 0) + } + // remaining code like BK1 + pf := func(n NI) bool { + r2.Set(*R) + r2.SetBit(n, 1) + p2.Clear() + x2.Clear() + for _, to := range a[n] { + if P.Bit(to) == 1 { + p2.SetBit(to, 1) + } + if X.Bit(to) == 1 { + x2.SetBit(to, 1) + } + } + if !f(&r2, &p2, &x2) { + return false + } + P.SetBit(n, 0) + X.SetBit(n, 1) + return true + } + if !pnu.Iterate(pf) { + return false + } + case X.Zero(): + return emit(R.Slice()) + } + return true + } + var R, P, X Bits + P.SetAll(len(a)) + f(&R, &P, &X) +} + +// BronKerbosch3 finds maximal cliques in an undirected graph. +// +// The graph must not contain parallel edges or loops. +// +// See https://en.wikipedia.org/wiki/Clique_(graph_theory) and +// https://en.wikipedia.org/wiki/Bron%E2%80%93Kerbosch_algorithm for background. +// +// This method implements the BronKerbosch3 algorithm of WP; that is, +// the original algorithm with pivoting and degeneracy ordering. +// +// The argument is a pivot function that must return a node of P or X. +// P is guaranteed to contain at least one node. X is not. +// For example see BKPivotMaxDegree. +// +// The method calls the emit argument for each maximal clique in g, as long +// as emit returns true. If emit returns false, BronKerbosch1 returns +// immediately. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also simpler variants BronKerbosch1 and BronKerbosch2. +func (g Undirected) BronKerbosch3(pivot func(P, X *Bits) NI, emit func([]NI) bool) { + a := g.AdjacencyList + var f func(R, P, X *Bits) bool + f = func(R, P, X *Bits) bool { + switch { + case !P.Zero(): + var r2, p2, x2, pnu Bits + // compute P \ N(u). next lines are only difference from BK1 + pnu.Set(*P) + for _, to := range a[pivot(P, X)] { + pnu.SetBit(to, 0) + } + // remaining code like BK2 + pf := func(n NI) bool { + r2.Set(*R) + r2.SetBit(n, 1) + p2.Clear() + x2.Clear() + for _, to := range a[n] { + if P.Bit(to) == 1 { + p2.SetBit(to, 1) + } + if X.Bit(to) == 1 { + x2.SetBit(to, 1) + } + } + if !f(&r2, &p2, &x2) { + return false + } + P.SetBit(n, 0) + X.SetBit(n, 1) + return true + } + if !pnu.Iterate(pf) { + return false + } + case X.Zero(): + return emit(R.Slice()) + } + return true + } + var R, P, X Bits + P.SetAll(len(a)) + // code above same as BK2 + // code below new to BK3 + _, ord, _ := g.Degeneracy() + var p2, x2 Bits + for _, n := range ord { + R.SetBit(n, 1) + p2.Clear() + x2.Clear() + for _, to := range a[n] { + if P.Bit(to) == 1 { + p2.SetBit(to, 1) + } + if X.Bit(to) == 1 { + x2.SetBit(to, 1) + } + } + if !f(&R, &p2, &x2) { + return + } + R.SetBit(n, 0) + P.SetBit(n, 0) + X.SetBit(n, 1) + } +} + +// ConnectedComponentBits returns a function that iterates over connected +// components of g, returning a member bitmap for each. +// +// Each call of the returned function returns the order (number of nodes) +// and bits of a connected component. The returned function returns zeros +// after returning all connected components. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also ConnectedComponentReps, which has lighter weight return values. +func (g Undirected) ConnectedComponentBits() func() (order int, bits Bits) { + a := g.AdjacencyList + var vg Bits // nodes visited in graph + var vc *Bits // nodes visited in current component + var nc int + var df func(NI) + df = func(n NI) { + vg.SetBit(n, 1) + vc.SetBit(n, 1) + nc++ + for _, nb := range a[n] { + if vg.Bit(nb) == 0 { + df(nb) + } + } + return + } + var n NI + return func() (o int, bits Bits) { + for ; n < NI(len(a)); n++ { + if vg.Bit(n) == 0 { + vc = &bits + nc = 0 + df(n) + return nc, bits + } + } + return + } +} + +// ConnectedComponentLists returns a function that iterates over connected +// components of g, returning the member list of each. +// +// Each call of the returned function returns a node list of a connected +// component. The returned function returns nil after returning all connected +// components. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also ConnectedComponentReps, which has lighter weight return values. +func (g Undirected) ConnectedComponentLists() func() []NI { + a := g.AdjacencyList + var vg Bits // nodes visited in graph + var m []NI // members of current component + var df func(NI) + df = func(n NI) { + vg.SetBit(n, 1) + m = append(m, n) + for _, nb := range a[n] { + if vg.Bit(nb) == 0 { + df(nb) + } + } + return + } + var n NI + return func() []NI { + for ; n < NI(len(a)); n++ { + if vg.Bit(n) == 0 { + m = nil + df(n) + return m + } + } + return nil + } +} + +// ConnectedComponentReps returns a representative node from each connected +// component of g. +// +// Returned is a slice with a single representative node from each connected +// component and also a parallel slice with the order, or number of nodes, +// in the corresponding component. +// +// This is fairly minimal information describing connected components. +// From a representative node, other nodes in the component can be reached +// by depth first traversal for example. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also ConnectedComponentBits and ConnectedComponentLists which can +// collect component members in a single traversal, and IsConnected which +// is an even simpler boolean test. +func (g Undirected) ConnectedComponentReps() (reps []NI, orders []int) { + a := g.AdjacencyList + var c Bits + var o int + var df func(NI) + df = func(n NI) { + c.SetBit(n, 1) + o++ + for _, nb := range a[n] { + if c.Bit(nb) == 0 { + df(nb) + } + } + return + } + for n := range a { + if c.Bit(NI(n)) == 0 { + reps = append(reps, NI(n)) + o = 0 + df(NI(n)) + orders = append(orders, o) + } + } + return +} + +// Copy makes a deep copy of g. +// Copy also computes the arc size ma, the number of arcs. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Undirected) Copy() (c Undirected, ma int) { + l, s := g.AdjacencyList.Copy() + return Undirected{l}, s +} + +// Degeneracy computes k-degeneracy, vertex ordering and k-cores. +// +// See Wikipedia https://en.wikipedia.org/wiki/Degeneracy_(graph_theory) +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Undirected) Degeneracy() (k int, ordering []NI, cores []int) { + a := g.AdjacencyList + // WP algorithm + ordering = make([]NI, len(a)) + var L Bits + d := make([]int, len(a)) + var D [][]NI + for v, nb := range a { + dv := len(nb) + d[v] = dv + for len(D) <= dv { + D = append(D, nil) + } + D[dv] = append(D[dv], NI(v)) + } + for ox := range a { + // find a non-empty D + i := 0 + for len(D[i]) == 0 { + i++ + } + // k is max(i, k) + if i > k { + for len(cores) <= i { + cores = append(cores, 0) + } + cores[k] = ox + k = i + } + // select from D[i] + Di := D[i] + last := len(Di) - 1 + v := Di[last] + // Add v to ordering, remove from Di + ordering[ox] = v + L.SetBit(v, 1) + D[i] = Di[:last] + // move neighbors + for _, nb := range a[v] { + if L.Bit(nb) == 1 { + continue + } + dn := d[nb] // old number of neighbors of nb + Ddn := D[dn] // nb is in this list + // remove it from the list + for wx, w := range Ddn { + if w == nb { + last := len(Ddn) - 1 + Ddn[wx], Ddn[last] = Ddn[last], Ddn[wx] + D[dn] = Ddn[:last] + } + } + dn-- // new number of neighbors + d[nb] = dn + // re--add it to it's new list + D[dn] = append(D[dn], nb) + } + } + cores[k] = len(ordering) + return +} + +// Degree for undirected graphs, returns the degree of a node. +// +// The degree of a node in an undirected graph is the number of incident +// edges, where loops count twice. +// +// If g is known to be loop-free, the result is simply equivalent to len(g[n]). +// See handshaking lemma example at AdjacencyList.ArcSize. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Undirected) Degree(n NI) int { + to := g.AdjacencyList[n] + d := len(to) // just "out" degree, + for _, to := range to { + if to == n { + d++ // except loops count twice + } + } + return d +} + +// FromList constructs a FromList representing the tree reachable from +// the given root. +// +// The connected component containing root should represent a simple graph, +// connected as a tree. +// +// For nodes connected as a tree, the Path member of the returned FromList +// will be populated with both From and Len values. The MaxLen member will be +// set but Leaves will be left a zero value. Return value cycle will be -1. +// +// If the connected component containing root is not connected as a tree, +// a cycle will be detected. The returned FromList will be a zero value and +// return value cycle will be a node involved in the cycle. +// +// Loops and parallel edges will be detected as cycles, however only in the +// connected component containing root. If g is not fully connected, nodes +// not reachable from root will have PathEnd values of {From: -1, Len: 0}. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Undirected) FromList(root NI) (f FromList, cycle NI) { + p := make([]PathEnd, len(g.AdjacencyList)) + for i := range p { + p[i].From = -1 + } + ml := 0 + var df func(NI, NI) bool + df = func(fr, n NI) bool { + l := p[n].Len + 1 + for _, to := range g.AdjacencyList[n] { + if to == fr { + continue + } + if p[to].Len > 0 { + cycle = to + return false + } + p[to] = PathEnd{From: n, Len: l} + if l > ml { + ml = l + } + if !df(n, to) { + return false + } + } + return true + } + p[root].Len = 1 + if !df(-1, root) { + return + } + return FromList{Paths: p, MaxLen: ml}, -1 +} + +// IsConnected tests if an undirected graph is a single connected component. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also ConnectedComponentReps for a method returning more information. +func (g Undirected) IsConnected() bool { + a := g.AdjacencyList + if len(a) == 0 { + return true + } + var b Bits + b.SetAll(len(a)) + var df func(NI) + df = func(n NI) { + b.SetBit(n, 0) + for _, to := range a[n] { + if b.Bit(to) == 1 { + df(to) + } + } + } + df(0) + return b.Zero() +} + +// IsTree identifies trees in undirected graphs. +// +// Return value isTree is true if the connected component reachable from root +// is a tree. Further, return value allTree is true if the entire graph g is +// connected. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Undirected) IsTree(root NI) (isTree, allTree bool) { + a := g.AdjacencyList + var v Bits + v.SetAll(len(a)) + var df func(NI, NI) bool + df = func(fr, n NI) bool { + if v.Bit(n) == 0 { + return false + } + v.SetBit(n, 0) + for _, to := range a[n] { + if to != fr && !df(n, to) { + return false + } + } + return true + } + v.SetBit(root, 0) + for _, to := range a[root] { + if !df(root, to) { + return false, false + } + } + return true, v.Zero() +} + +// Size returns the number of edges in g. +// +// See also ArcSize and HasLoop. +func (g Undirected) Size() int { + m2 := 0 + for fr, to := range g.AdjacencyList { + m2 += len(to) + for _, to := range to { + if to == NI(fr) { + m2++ + } + } + } + return m2 / 2 +} diff --git a/vendor/github.com/soniakeys/graph/undir_cg.go b/vendor/github.com/soniakeys/graph/undir_cg.go new file mode 100644 index 00000000..35b5b97d --- /dev/null +++ b/vendor/github.com/soniakeys/graph/undir_cg.go @@ -0,0 +1,659 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +package graph + +// undir_RO.go is code generated from undir_cg.go by directives in graph.go. +// Editing undir_cg.go is okay. It is the code generation source. +// DO NOT EDIT undir_RO.go. +// The RO means read only and it is upper case RO to slow you down a bit +// in case you start to edit the file. + +// Bipartite determines if a connected component of an undirected graph +// is bipartite, a component where nodes can be partitioned into two sets +// such that every edge in the component goes from one set to the other. +// +// Argument n can be any representative node of the component. +// +// If the component is bipartite, Bipartite returns true and a two-coloring +// of the component. Each color set is returned as a bitmap. If the component +// is not bipartite, Bipartite returns false and a representative odd cycle. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledUndirected) Bipartite(n NI) (b bool, c1, c2 Bits, oc []NI) { + b = true + var open bool + var df func(n NI, c1, c2 *Bits) + df = func(n NI, c1, c2 *Bits) { + c1.SetBit(n, 1) + for _, nb := range g.LabeledAdjacencyList[n] { + if c1.Bit(nb.To) == 1 { + b = false + oc = []NI{nb.To, n} + open = true + return + } + if c2.Bit(nb.To) == 1 { + continue + } + df(nb.To, c2, c1) + if b { + continue + } + switch { + case !open: + case n == oc[0]: + open = false + default: + oc = append(oc, n) + } + return + } + } + df(n, &c1, &c2) + if b { + return b, c1, c2, nil + } + return b, Bits{}, Bits{}, oc +} + +// BronKerbosch1 finds maximal cliques in an undirected graph. +// +// The graph must not contain parallel edges or loops. +// +// See https://en.wikipedia.org/wiki/Clique_(graph_theory) and +// https://en.wikipedia.org/wiki/Bron%E2%80%93Kerbosch_algorithm for background. +// +// This method implements the BronKerbosch1 algorithm of WP; that is, +// the original algorithm without improvements. +// +// The method calls the emit argument for each maximal clique in g, as long +// as emit returns true. If emit returns false, BronKerbosch1 returns +// immediately. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also more sophisticated variants BronKerbosch2 and BronKerbosch3. +func (g LabeledUndirected) BronKerbosch1(emit func([]NI) bool) { + a := g.LabeledAdjacencyList + var f func(R, P, X *Bits) bool + f = func(R, P, X *Bits) bool { + switch { + case !P.Zero(): + var r2, p2, x2 Bits + pf := func(n NI) bool { + r2.Set(*R) + r2.SetBit(n, 1) + p2.Clear() + x2.Clear() + for _, to := range a[n] { + if P.Bit(to.To) == 1 { + p2.SetBit(to.To, 1) + } + if X.Bit(to.To) == 1 { + x2.SetBit(to.To, 1) + } + } + if !f(&r2, &p2, &x2) { + return false + } + P.SetBit(n, 0) + X.SetBit(n, 1) + return true + } + if !P.Iterate(pf) { + return false + } + case X.Zero(): + return emit(R.Slice()) + } + return true + } + var R, P, X Bits + P.SetAll(len(a)) + f(&R, &P, &X) +} + +// BKPivotMaxDegree is a strategy for BronKerbosch methods. +// +// To use it, take the method value (see golang.org/ref/spec#Method_values) +// and pass it as the argument to BronKerbosch2 or 3. +// +// The strategy is to pick the node from P or X with the maximum degree +// (number of edges) in g. Note this is a shortcut from evaluating degrees +// in P. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledUndirected) BKPivotMaxDegree(P, X *Bits) (p NI) { + // choose pivot u as highest degree node from P or X + a := g.LabeledAdjacencyList + maxDeg := -1 + P.Iterate(func(n NI) bool { // scan P + if d := len(a[n]); d > maxDeg { + p = n + maxDeg = d + } + return true + }) + X.Iterate(func(n NI) bool { // scan X + if d := len(a[n]); d > maxDeg { + p = n + maxDeg = d + } + return true + }) + return +} + +// BKPivotMinP is a strategy for BronKerbosch methods. +// +// To use it, take the method value (see golang.org/ref/spec#Method_values) +// and pass it as the argument to BronKerbosch2 or 3. +// +// The strategy is to simply pick the first node in P. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledUndirected) BKPivotMinP(P, X *Bits) NI { + return P.From(0) +} + +// BronKerbosch2 finds maximal cliques in an undirected graph. +// +// The graph must not contain parallel edges or loops. +// +// See https://en.wikipedia.org/wiki/Clique_(graph_theory) and +// https://en.wikipedia.org/wiki/Bron%E2%80%93Kerbosch_algorithm for background. +// +// This method implements the BronKerbosch2 algorithm of WP; that is, +// the original algorithm plus pivoting. +// +// The argument is a pivot function that must return a node of P or X. +// P is guaranteed to contain at least one node. X is not. +// For example see BKPivotMaxDegree. +// +// The method calls the emit argument for each maximal clique in g, as long +// as emit returns true. If emit returns false, BronKerbosch1 returns +// immediately. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also simpler variant BronKerbosch1 and more sophisticated variant +// BronKerbosch3. +func (g LabeledUndirected) BronKerbosch2(pivot func(P, X *Bits) NI, emit func([]NI) bool) { + a := g.LabeledAdjacencyList + var f func(R, P, X *Bits) bool + f = func(R, P, X *Bits) bool { + switch { + case !P.Zero(): + var r2, p2, x2, pnu Bits + // compute P \ N(u). next 5 lines are only difference from BK1 + pnu.Set(*P) + for _, to := range a[pivot(P, X)] { + pnu.SetBit(to.To, 0) + } + // remaining code like BK1 + pf := func(n NI) bool { + r2.Set(*R) + r2.SetBit(n, 1) + p2.Clear() + x2.Clear() + for _, to := range a[n] { + if P.Bit(to.To) == 1 { + p2.SetBit(to.To, 1) + } + if X.Bit(to.To) == 1 { + x2.SetBit(to.To, 1) + } + } + if !f(&r2, &p2, &x2) { + return false + } + P.SetBit(n, 0) + X.SetBit(n, 1) + return true + } + if !pnu.Iterate(pf) { + return false + } + case X.Zero(): + return emit(R.Slice()) + } + return true + } + var R, P, X Bits + P.SetAll(len(a)) + f(&R, &P, &X) +} + +// BronKerbosch3 finds maximal cliques in an undirected graph. +// +// The graph must not contain parallel edges or loops. +// +// See https://en.wikipedia.org/wiki/Clique_(graph_theory) and +// https://en.wikipedia.org/wiki/Bron%E2%80%93Kerbosch_algorithm for background. +// +// This method implements the BronKerbosch3 algorithm of WP; that is, +// the original algorithm with pivoting and degeneracy ordering. +// +// The argument is a pivot function that must return a node of P or X. +// P is guaranteed to contain at least one node. X is not. +// For example see BKPivotMaxDegree. +// +// The method calls the emit argument for each maximal clique in g, as long +// as emit returns true. If emit returns false, BronKerbosch1 returns +// immediately. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also simpler variants BronKerbosch1 and BronKerbosch2. +func (g LabeledUndirected) BronKerbosch3(pivot func(P, X *Bits) NI, emit func([]NI) bool) { + a := g.LabeledAdjacencyList + var f func(R, P, X *Bits) bool + f = func(R, P, X *Bits) bool { + switch { + case !P.Zero(): + var r2, p2, x2, pnu Bits + // compute P \ N(u). next lines are only difference from BK1 + pnu.Set(*P) + for _, to := range a[pivot(P, X)] { + pnu.SetBit(to.To, 0) + } + // remaining code like BK2 + pf := func(n NI) bool { + r2.Set(*R) + r2.SetBit(n, 1) + p2.Clear() + x2.Clear() + for _, to := range a[n] { + if P.Bit(to.To) == 1 { + p2.SetBit(to.To, 1) + } + if X.Bit(to.To) == 1 { + x2.SetBit(to.To, 1) + } + } + if !f(&r2, &p2, &x2) { + return false + } + P.SetBit(n, 0) + X.SetBit(n, 1) + return true + } + if !pnu.Iterate(pf) { + return false + } + case X.Zero(): + return emit(R.Slice()) + } + return true + } + var R, P, X Bits + P.SetAll(len(a)) + // code above same as BK2 + // code below new to BK3 + _, ord, _ := g.Degeneracy() + var p2, x2 Bits + for _, n := range ord { + R.SetBit(n, 1) + p2.Clear() + x2.Clear() + for _, to := range a[n] { + if P.Bit(to.To) == 1 { + p2.SetBit(to.To, 1) + } + if X.Bit(to.To) == 1 { + x2.SetBit(to.To, 1) + } + } + if !f(&R, &p2, &x2) { + return + } + R.SetBit(n, 0) + P.SetBit(n, 0) + X.SetBit(n, 1) + } +} + +// ConnectedComponentBits returns a function that iterates over connected +// components of g, returning a member bitmap for each. +// +// Each call of the returned function returns the order (number of nodes) +// and bits of a connected component. The returned function returns zeros +// after returning all connected components. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also ConnectedComponentReps, which has lighter weight return values. +func (g LabeledUndirected) ConnectedComponentBits() func() (order int, bits Bits) { + a := g.LabeledAdjacencyList + var vg Bits // nodes visited in graph + var vc *Bits // nodes visited in current component + var nc int + var df func(NI) + df = func(n NI) { + vg.SetBit(n, 1) + vc.SetBit(n, 1) + nc++ + for _, nb := range a[n] { + if vg.Bit(nb.To) == 0 { + df(nb.To) + } + } + return + } + var n NI + return func() (o int, bits Bits) { + for ; n < NI(len(a)); n++ { + if vg.Bit(n) == 0 { + vc = &bits + nc = 0 + df(n) + return nc, bits + } + } + return + } +} + +// ConnectedComponentLists returns a function that iterates over connected +// components of g, returning the member list of each. +// +// Each call of the returned function returns a node list of a connected +// component. The returned function returns nil after returning all connected +// components. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also ConnectedComponentReps, which has lighter weight return values. +func (g LabeledUndirected) ConnectedComponentLists() func() []NI { + a := g.LabeledAdjacencyList + var vg Bits // nodes visited in graph + var m []NI // members of current component + var df func(NI) + df = func(n NI) { + vg.SetBit(n, 1) + m = append(m, n) + for _, nb := range a[n] { + if vg.Bit(nb.To) == 0 { + df(nb.To) + } + } + return + } + var n NI + return func() []NI { + for ; n < NI(len(a)); n++ { + if vg.Bit(n) == 0 { + m = nil + df(n) + return m + } + } + return nil + } +} + +// ConnectedComponentReps returns a representative node from each connected +// component of g. +// +// Returned is a slice with a single representative node from each connected +// component and also a parallel slice with the order, or number of nodes, +// in the corresponding component. +// +// This is fairly minimal information describing connected components. +// From a representative node, other nodes in the component can be reached +// by depth first traversal for example. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also ConnectedComponentBits and ConnectedComponentLists which can +// collect component members in a single traversal, and IsConnected which +// is an even simpler boolean test. +func (g LabeledUndirected) ConnectedComponentReps() (reps []NI, orders []int) { + a := g.LabeledAdjacencyList + var c Bits + var o int + var df func(NI) + df = func(n NI) { + c.SetBit(n, 1) + o++ + for _, nb := range a[n] { + if c.Bit(nb.To) == 0 { + df(nb.To) + } + } + return + } + for n := range a { + if c.Bit(NI(n)) == 0 { + reps = append(reps, NI(n)) + o = 0 + df(NI(n)) + orders = append(orders, o) + } + } + return +} + +// Copy makes a deep copy of g. +// Copy also computes the arc size ma, the number of arcs. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledUndirected) Copy() (c LabeledUndirected, ma int) { + l, s := g.LabeledAdjacencyList.Copy() + return LabeledUndirected{l}, s +} + +// Degeneracy computes k-degeneracy, vertex ordering and k-cores. +// +// See Wikipedia https://en.wikipedia.org/wiki/Degeneracy_(graph_theory) +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledUndirected) Degeneracy() (k int, ordering []NI, cores []int) { + a := g.LabeledAdjacencyList + // WP algorithm + ordering = make([]NI, len(a)) + var L Bits + d := make([]int, len(a)) + var D [][]NI + for v, nb := range a { + dv := len(nb) + d[v] = dv + for len(D) <= dv { + D = append(D, nil) + } + D[dv] = append(D[dv], NI(v)) + } + for ox := range a { + // find a non-empty D + i := 0 + for len(D[i]) == 0 { + i++ + } + // k is max(i, k) + if i > k { + for len(cores) <= i { + cores = append(cores, 0) + } + cores[k] = ox + k = i + } + // select from D[i] + Di := D[i] + last := len(Di) - 1 + v := Di[last] + // Add v to ordering, remove from Di + ordering[ox] = v + L.SetBit(v, 1) + D[i] = Di[:last] + // move neighbors + for _, nb := range a[v] { + if L.Bit(nb.To) == 1 { + continue + } + dn := d[nb.To] // old number of neighbors of nb + Ddn := D[dn] // nb is in this list + // remove it from the list + for wx, w := range Ddn { + if w == nb.To { + last := len(Ddn) - 1 + Ddn[wx], Ddn[last] = Ddn[last], Ddn[wx] + D[dn] = Ddn[:last] + } + } + dn-- // new number of neighbors + d[nb.To] = dn + // re--add it to it's new list + D[dn] = append(D[dn], nb.To) + } + } + cores[k] = len(ordering) + return +} + +// Degree for undirected graphs, returns the degree of a node. +// +// The degree of a node in an undirected graph is the number of incident +// edges, where loops count twice. +// +// If g is known to be loop-free, the result is simply equivalent to len(g[n]). +// See handshaking lemma example at AdjacencyList.ArcSize. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledUndirected) Degree(n NI) int { + to := g.LabeledAdjacencyList[n] + d := len(to) // just "out" degree, + for _, to := range to { + if to.To == n { + d++ // except loops count twice + } + } + return d +} + +// FromList constructs a FromList representing the tree reachable from +// the given root. +// +// The connected component containing root should represent a simple graph, +// connected as a tree. +// +// For nodes connected as a tree, the Path member of the returned FromList +// will be populated with both From and Len values. The MaxLen member will be +// set but Leaves will be left a zero value. Return value cycle will be -1. +// +// If the connected component containing root is not connected as a tree, +// a cycle will be detected. The returned FromList will be a zero value and +// return value cycle will be a node involved in the cycle. +// +// Loops and parallel edges will be detected as cycles, however only in the +// connected component containing root. If g is not fully connected, nodes +// not reachable from root will have PathEnd values of {From: -1, Len: 0}. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledUndirected) FromList(root NI) (f FromList, cycle NI) { + p := make([]PathEnd, len(g.LabeledAdjacencyList)) + for i := range p { + p[i].From = -1 + } + ml := 0 + var df func(NI, NI) bool + df = func(fr, n NI) bool { + l := p[n].Len + 1 + for _, to := range g.LabeledAdjacencyList[n] { + if to.To == fr { + continue + } + if p[to.To].Len > 0 { + cycle = to.To + return false + } + p[to.To] = PathEnd{From: n, Len: l} + if l > ml { + ml = l + } + if !df(n, to.To) { + return false + } + } + return true + } + p[root].Len = 1 + if !df(-1, root) { + return + } + return FromList{Paths: p, MaxLen: ml}, -1 +} + +// IsConnected tests if an undirected graph is a single connected component. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also ConnectedComponentReps for a method returning more information. +func (g LabeledUndirected) IsConnected() bool { + a := g.LabeledAdjacencyList + if len(a) == 0 { + return true + } + var b Bits + b.SetAll(len(a)) + var df func(NI) + df = func(n NI) { + b.SetBit(n, 0) + for _, to := range a[n] { + if b.Bit(to.To) == 1 { + df(to.To) + } + } + } + df(0) + return b.Zero() +} + +// IsTree identifies trees in undirected graphs. +// +// Return value isTree is true if the connected component reachable from root +// is a tree. Further, return value allTree is true if the entire graph g is +// connected. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledUndirected) IsTree(root NI) (isTree, allTree bool) { + a := g.LabeledAdjacencyList + var v Bits + v.SetAll(len(a)) + var df func(NI, NI) bool + df = func(fr, n NI) bool { + if v.Bit(n) == 0 { + return false + } + v.SetBit(n, 0) + for _, to := range a[n] { + if to.To != fr && !df(n, to.To) { + return false + } + } + return true + } + v.SetBit(root, 0) + for _, to := range a[root] { + if !df(root, to.To) { + return false, false + } + } + return true, v.Zero() +} + +// Size returns the number of edges in g. +// +// See also ArcSize and HasLoop. +func (g LabeledUndirected) Size() int { + m2 := 0 + for fr, to := range g.LabeledAdjacencyList { + m2 += len(to) + for _, to := range to { + if to.To == NI(fr) { + m2++ + } + } + } + return m2 / 2 +} diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/net/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/net/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go new file mode 100644 index 00000000..56efb95b --- /dev/null +++ b/vendor/golang.org/x/net/context/context.go @@ -0,0 +1,156 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context + +import "time" + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out <-chan Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() diff --git a/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go b/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go new file mode 100644 index 00000000..e3170e33 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go @@ -0,0 +1,19 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.5 + +package ctxhttp + +import "net/http" + +func canceler(client *http.Client, req *http.Request) func() { + // TODO(djd): Respect any existing value of req.Cancel. + ch := make(chan struct{}) + req.Cancel = ch + + return func() { + close(ch) + } +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go b/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go new file mode 100644 index 00000000..56bcbadb --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.5 + +package ctxhttp + +import "net/http" + +type requestCanceler interface { + CancelRequest(*http.Request) +} + +func canceler(client *http.Client, req *http.Request) func() { + rc, ok := client.Transport.(requestCanceler) + if !ok { + return func() {} + } + return func() { + rc.CancelRequest(req) + } +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go new file mode 100644 index 00000000..e35860a7 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -0,0 +1,145 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ctxhttp provides helper functions for performing context-aware HTTP requests. +package ctxhttp + +import ( + "io" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +func nop() {} + +var ( + testHookContextDoneBeforeHeaders = nop + testHookDoReturned = nop + testHookDidBodyClose = nop +) + +// Do sends an HTTP request with the provided http.Client and returns an HTTP response. +// If the client is nil, http.DefaultClient is used. +// If the context is canceled or times out, ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + // Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go. + cancel := canceler(client, req) + + type responseAndError struct { + resp *http.Response + err error + } + result := make(chan responseAndError, 1) + + // Make local copies of test hooks closed over by goroutines below. + // Prevents data races in tests. + testHookDoReturned := testHookDoReturned + testHookDidBodyClose := testHookDidBodyClose + + go func() { + resp, err := client.Do(req) + testHookDoReturned() + result <- responseAndError{resp, err} + }() + + var resp *http.Response + + select { + case <-ctx.Done(): + testHookContextDoneBeforeHeaders() + cancel() + // Clean up after the goroutine calling client.Do: + go func() { + if r := <-result; r.resp != nil { + testHookDidBodyClose() + r.resp.Body.Close() + } + }() + return nil, ctx.Err() + case r := <-result: + var err error + resp, err = r.resp, r.err + if err != nil { + return resp, err + } + } + + c := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + cancel() + case <-c: + // The response's Body is closed. + } + }() + resp.Body = ¬ifyingReader{resp.Body, c} + + return resp, nil +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// notifyingReader is an io.ReadCloser that closes the notify channel after +// Close is called or a Read fails on the underlying ReadCloser. +type notifyingReader struct { + io.ReadCloser + notify chan<- struct{} +} + +func (r *notifyingReader) Read(p []byte) (int, error) { + n, err := r.ReadCloser.Read(p) + if err != nil && r.notify != nil { + close(r.notify) + r.notify = nil + } + return n, err +} + +func (r *notifyingReader) Close() error { + err := r.ReadCloser.Close() + if r.notify != nil { + close(r.notify) + r.notify = nil + } + return err +} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go new file mode 100644 index 00000000..f8cda19a --- /dev/null +++ b/vendor/golang.org/x/net/context/go17.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +var ( + todo = context.TODO() + background = context.Background() +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = context.DeadlineExceeded + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + ctx, f := context.WithCancel(parent) + return ctx, CancelFunc(f) +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + ctx, f := context.WithDeadline(parent, deadline) + return ctx, CancelFunc(f) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go new file mode 100644 index 00000000..5a30acab --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -0,0 +1,300 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, c) + return c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) *cancelCtx { + return &cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + *cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/vendor/golang.org/x/oauth2/.travis.yml b/vendor/golang.org/x/oauth2/.travis.yml new file mode 100644 index 00000000..a035125c --- /dev/null +++ b/vendor/golang.org/x/oauth2/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.3 + - 1.4 + +install: + - export GOPATH="$HOME/gopath" + - mkdir -p "$GOPATH/src/golang.org/x" + - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2" + - go get -v -t -d golang.org/x/oauth2/... + +script: + - go test -v golang.org/x/oauth2/... diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS new file mode 100644 index 00000000..15167cd7 --- /dev/null +++ b/vendor/golang.org/x/oauth2/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md new file mode 100644 index 00000000..46aa2b12 --- /dev/null +++ b/vendor/golang.org/x/oauth2/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + + +## Filing issues + +When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +**We do not accept GitHub pull requests** +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. + diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS new file mode 100644 index 00000000..1c4577e9 --- /dev/null +++ b/vendor/golang.org/x/oauth2/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE new file mode 100644 index 00000000..d02f24fd --- /dev/null +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The oauth2 Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md new file mode 100644 index 00000000..0d514173 --- /dev/null +++ b/vendor/golang.org/x/oauth2/README.md @@ -0,0 +1,64 @@ +# OAuth2 for Go + +[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) + +oauth2 package contains a client implementation for OAuth 2.0 spec. + +## Installation + +~~~~ +go get golang.org/x/oauth2 +~~~~ + +See godoc for further documentation and examples. + +* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) +* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) + + +## App Engine + +In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor +of the [`context.Context`](https://golang.org/x/net/context#Context) type from +the `golang.org/x/net/context` package + +This means its no longer possible to use the "Classic App Engine" +`appengine.Context` type with the `oauth2` package. (You're using +Classic App Engine if you import the package `"appengine"`.) + +To work around this, you may use the new `"google.golang.org/appengine"` +package. This package has almost the same API as the `"appengine"` package, +but it can be fetched with `go get` and used on "Managed VMs" and well as +Classic App Engine. + +See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app) +for information on updating your app. + +If you don't want to update your entire app to use the new App Engine packages, +you may use both sets of packages in parallel, using only the new packages +with the `oauth2` package. + + import ( + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + newappengine "google.golang.org/appengine" + newurlfetch "google.golang.org/appengine/urlfetch" + + "appengine" + ) + + func handler(w http.ResponseWriter, r *http.Request) { + var c appengine.Context = appengine.NewContext(r) + c.Infof("Logging a message with the old package") + + var ctx context.Context = newappengine.NewContext(r) + client := &http.Client{ + Transport: &oauth2.Transport{ + Source: google.AppEngineTokenSource(ctx, "scope"), + Base: &newurlfetch.Transport{Context: ctx}, + }, + } + client.Get("...") + } + diff --git a/vendor/golang.org/x/oauth2/client_appengine.go b/vendor/golang.org/x/oauth2/client_appengine.go new file mode 100644 index 00000000..8962c49d --- /dev/null +++ b/vendor/golang.org/x/oauth2/client_appengine.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +// App Engine hooks. + +package oauth2 + +import ( + "net/http" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" + "google.golang.org/appengine/urlfetch" +) + +func init() { + internal.RegisterContextClientFunc(contextClientAppEngine) +} + +func contextClientAppEngine(ctx context.Context) (*http.Client, error) { + return urlfetch.Client(ctx), nil +} diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go new file mode 100644 index 00000000..fbe1028d --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -0,0 +1,76 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "bufio" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io" + "strings" +) + +// ParseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func ParseKey(key []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(key) + if block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err) + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("private key is invalid") + } + return parsed, nil +} + +func ParseINI(ini io.Reader) (map[string]map[string]string, error) { + result := map[string]map[string]string{ + "": map[string]string{}, // root section + } + scanner := bufio.NewScanner(ini) + currentSection := "" + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, ";") { + // comment. + continue + } + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + currentSection = strings.TrimSpace(line[1 : len(line)-1]) + result[currentSection] = map[string]string{} + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 && parts[0] != "" { + result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) + } + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning ini: %v", err) + } + return result, nil +} + +func CondVal(v string) []string { + if v == "" { + return nil + } + return []string{v} +} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go new file mode 100644 index 00000000..a6ed3cc7 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -0,0 +1,225 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" +) + +// Token represents the crendentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// This type is a mirror of oauth2.Token and exists to break +// an otherwise-circular dependency. Other internal packages +// should convert this Token into an oauth2.Token before use. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time + + // Raw optionally contains extra metadata from the server + // when updating a token. + Raw interface{} +} + +// tokenJSON is the struct representing the HTTP response from OAuth2 +// providers returning a token in JSON form. +type tokenJSON struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` + ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number + Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in +} + +func (e *tokenJSON) expiry() (t time.Time) { + if v := e.ExpiresIn; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + if v := e.Expires; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + return +} + +type expirationTime int32 + +func (e *expirationTime) UnmarshalJSON(b []byte) error { + var n json.Number + err := json.Unmarshal(b, &n) + if err != nil { + return err + } + i, err := n.Int64() + if err != nil { + return err + } + *e = expirationTime(i) + return nil +} + +var brokenAuthHeaderProviders = []string{ + "https://accounts.google.com/", + "https://api.dropbox.com/", + "https://api.dropboxapi.com/", + "https://api.instagram.com/", + "https://api.netatmo.net/", + "https://api.odnoklassniki.ru/", + "https://api.pushbullet.com/", + "https://api.soundcloud.com/", + "https://api.twitch.tv/", + "https://app.box.com/", + "https://connect.stripe.com/", + "https://login.microsoftonline.com/", + "https://login.salesforce.com/", + "https://oauth.sandbox.trainingpeaks.com/", + "https://oauth.trainingpeaks.com/", + "https://oauth.vk.com/", + "https://openapi.baidu.com/", + "https://slack.com/", + "https://test-sandbox.auth.corp.google.com", + "https://test.salesforce.com/", + "https://user.gini.net/", + "https://www.douban.com/", + "https://www.googleapis.com/", + "https://www.linkedin.com/", + "https://www.strava.com/oauth/", + "https://www.wunderlist.com/oauth/", + "https://api.patreon.com/", +} + +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL) +} + +// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL +// implements the OAuth2 spec correctly +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +// In summary: +// - Reddit only accepts client secret in the Authorization header +// - Dropbox accepts either it in URL param or Auth header, but not both. +// - Google only accepts URL param (not spec compliant?), not Auth header +// - Stripe only accepts client secret in Auth header with Bearer method, not Basic +func providerAuthHeaderWorks(tokenURL string) bool { + for _, s := range brokenAuthHeaderProviders { + if strings.HasPrefix(tokenURL, s) { + // Some sites fail to implement the OAuth2 spec fully. + return false + } + } + + // Assume the provider implements the spec properly + // otherwise. We can add more exceptions as they're + // discovered. We will _not_ be adding configurable hooks + // to this package to let users select server bugs. + return true +} + +func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string, v url.Values) (*Token, error) { + hc, err := ContextClient(ctx) + if err != nil { + return nil, err + } + v.Set("client_id", ClientID) + bustedAuth := !providerAuthHeaderWorks(TokenURL) + if bustedAuth && ClientSecret != "" { + v.Set("client_secret", ClientSecret) + } + req, err := http.NewRequest("POST", TokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + if !bustedAuth { + req.SetBasicAuth(ClientID, ClientSecret) + } + r, err := hc.Do(req) + if err != nil { + return nil, err + } + defer r.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body) + } + + var token *Token + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + vals, err := url.ParseQuery(string(body)) + if err != nil { + return nil, err + } + token = &Token{ + AccessToken: vals.Get("access_token"), + TokenType: vals.Get("token_type"), + RefreshToken: vals.Get("refresh_token"), + Raw: vals, + } + e := vals.Get("expires_in") + if e == "" { + // TODO(jbd): Facebook's OAuth2 implementation is broken and + // returns expires_in field in expires. Remove the fallback to expires, + // when Facebook fixes their implementation. + e = vals.Get("expires") + } + expires, _ := strconv.Atoi(e) + if expires != 0 { + token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) + } + default: + var tj tokenJSON + if err = json.Unmarshal(body, &tj); err != nil { + return nil, err + } + token = &Token{ + AccessToken: tj.AccessToken, + TokenType: tj.TokenType, + RefreshToken: tj.RefreshToken, + Expiry: tj.expiry(), + Raw: make(map[string]interface{}), + } + json.Unmarshal(body, &token.Raw) // no error checks for optional fields + } + // Don't overwrite `RefreshToken` with an empty value + // if this was a token refreshing request. + if token.RefreshToken == "" { + token.RefreshToken = v.Get("refresh_token") + } + return token, nil +} diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go new file mode 100644 index 00000000..f1f173e3 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -0,0 +1,69 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient ContextKey + +// ContextKey is just an empty struct. It exists so HTTPClient can be +// an immutable public variable with a unique type. It's immutable +// because nobody else can create a ContextKey, being unexported. +type ContextKey struct{} + +// ContextClientFunc is a func which tries to return an *http.Client +// given a Context value. If it returns an error, the search stops +// with that error. If it returns (nil, nil), the search continues +// down the list of registered funcs. +type ContextClientFunc func(context.Context) (*http.Client, error) + +var contextClientFuncs []ContextClientFunc + +func RegisterContextClientFunc(fn ContextClientFunc) { + contextClientFuncs = append(contextClientFuncs, fn) +} + +func ContextClient(ctx context.Context) (*http.Client, error) { + if ctx != nil { + if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { + return hc, nil + } + } + for _, fn := range contextClientFuncs { + c, err := fn(ctx) + if err != nil { + return nil, err + } + if c != nil { + return c, nil + } + } + return http.DefaultClient, nil +} + +func ContextTransport(ctx context.Context) http.RoundTripper { + hc, err := ContextClient(ctx) + // This is a rare error case (somebody using nil on App Engine). + if err != nil { + return ErrorTransport{err} + } + return hc.Transport +} + +// ErrorTransport returns the specified error on RoundTrip. +// This RoundTripper should be used in rare error cases where +// error handling can be postponed to response handling time. +type ErrorTransport struct{ Err error } + +func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) { + return nil, t.Err +} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go new file mode 100644 index 00000000..9b7b977d --- /dev/null +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -0,0 +1,337 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package oauth2 provides support for making +// OAuth2 authorized and authenticated HTTP requests. +// It can additionally grant authorization with Bearer JWT. +package oauth2 + +import ( + "bytes" + "errors" + "net/http" + "net/url" + "strings" + "sync" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// NoContext is the default context you should supply if not using +// your own context.Context (see https://golang.org/x/net/context). +var NoContext = context.TODO() + +// RegisterBrokenAuthHeaderProvider registers an OAuth2 server +// identified by the tokenURL prefix as an OAuth2 implementation +// which doesn't support the HTTP Basic authentication +// scheme to authenticate with the authorization server. +// Once a server is registered, credentials (client_id and client_secret) +// will be passed as query parameters rather than being present +// in the Authorization header. +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + internal.RegisterBrokenAuthHeaderProvider(tokenURL) +} + +// Config describes a typical 3-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // Endpoint contains the resource server's token endpoint + // URLs. These are constants specific to each server and are + // often available via site-specific packages, such as + // google.Endpoint or github.Endpoint. + Endpoint Endpoint + + // RedirectURL is the URL to redirect users going through + // the OAuth flow, after the resource owner's URLs. + RedirectURL string + + // Scope specifies optional requested permissions. + Scopes []string +} + +// A TokenSource is anything that can return a token. +type TokenSource interface { + // Token returns a token or an error. + // Token must be safe for concurrent use by multiple goroutines. + // The returned Token must not be modified. + Token() (*Token, error) +} + +// Endpoint contains the OAuth 2.0 provider's authorization and token +// endpoint URLs. +type Endpoint struct { + AuthURL string + TokenURL string +} + +var ( + // AccessTypeOnline and AccessTypeOffline are options passed + // to the Options.AuthCodeURL method. They modify the + // "access_type" field that gets sent in the URL returned by + // AuthCodeURL. + // + // Online is the default if neither is specified. If your + // application needs to refresh access tokens when the user + // is not present at the browser, then use offline. This will + // result in your application obtaining a refresh token the + // first time your application exchanges an authorization + // code for a user. + AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") + AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") + + // ApprovalForce forces the users to view the consent dialog + // and confirm the permissions request at the URL returned + // from AuthCodeURL, even if they've already done so. + ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") +) + +// An AuthCodeOption is passed to Config.AuthCodeURL. +type AuthCodeOption interface { + setValue(url.Values) +} + +type setParam struct{ k, v string } + +func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } + +// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// to a provider's authorization endpoint. +func SetAuthURLParam(key, value string) AuthCodeOption { + return setParam{key, value} +} + +// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page +// that asks for permissions for the required scopes explicitly. +// +// State is a token to protect the user from CSRF attacks. You must +// always provide a non-zero string and validate that it matches the +// the state query parameter on your redirect callback. +// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// +// Opts may include AccessTypeOnline or AccessTypeOffline, as well +// as ApprovalForce. +func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { + var buf bytes.Buffer + buf.WriteString(c.Endpoint.AuthURL) + v := url.Values{ + "response_type": {"code"}, + "client_id": {c.ClientID}, + "redirect_uri": internal.CondVal(c.RedirectURL), + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + "state": internal.CondVal(state), + } + for _, opt := range opts { + opt.setValue(v) + } + if strings.Contains(c.Endpoint.AuthURL, "?") { + buf.WriteByte('&') + } else { + buf.WriteByte('?') + } + buf.WriteString(v.Encode()) + return buf.String() +} + +// PasswordCredentialsToken converts a resource owner username and password +// pair into a token. +// +// Per the RFC, this grant type should only be used "when there is a high +// degree of trust between the resource owner and the client (e.g., the client +// is part of the device operating system or a highly privileged application), +// and when other authorization grant types are not available." +// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. +// +// The HTTP client to use is derived from the context. +// If nil, http.DefaultClient is used. +func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"password"}, + "username": {username}, + "password": {password}, + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + }) +} + +// Exchange converts an authorization code into a token. +// +// It is used after a resource provider redirects the user back +// to the Redirect URI (the URL obtained from AuthCodeURL). +// +// The HTTP client to use is derived from the context. +// If a client is not provided via the context, http.DefaultClient is used. +// +// The code will be in the *http.Request.FormValue("code"). Before +// calling Exchange, be sure to validate FormValue("state"). +func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"authorization_code"}, + "code": {code}, + "redirect_uri": internal.CondVal(c.RedirectURL), + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + }) +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. The underlying +// HTTP transport will be obtained using the provided context. +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context, t *Token) *http.Client { + return NewClient(ctx, c.TokenSource(ctx, t)) +} + +// TokenSource returns a TokenSource that returns t until t expires, +// automatically refreshing it as necessary using the provided context. +// +// Most users will use Config.Client instead. +func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { + tkr := &tokenRefresher{ + ctx: ctx, + conf: c, + } + if t != nil { + tkr.refreshToken = t.RefreshToken + } + return &reuseTokenSource{ + t: t, + new: tkr, + } +} + +// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" +// HTTP requests to renew a token using a RefreshToken. +type tokenRefresher struct { + ctx context.Context // used to get HTTP requests + conf *Config + refreshToken string +} + +// WARNING: Token is not safe for concurrent access, as it +// updates the tokenRefresher's refreshToken field. +// Within this package, it is used by reuseTokenSource which +// synchronizes calls to this method with its own mutex. +func (tf *tokenRefresher) Token() (*Token, error) { + if tf.refreshToken == "" { + return nil, errors.New("oauth2: token expired and refresh token is not set") + } + + tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ + "grant_type": {"refresh_token"}, + "refresh_token": {tf.refreshToken}, + }) + + if err != nil { + return nil, err + } + if tf.refreshToken != tk.RefreshToken { + tf.refreshToken = tk.RefreshToken + } + return tk, err +} + +// reuseTokenSource is a TokenSource that holds a single token in memory +// and validates its expiry before each call to retrieve it with +// Token. If it's expired, it will be auto-refreshed using the +// new TokenSource. +type reuseTokenSource struct { + new TokenSource // called when t is expired. + + mu sync.Mutex // guards t + t *Token +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *reuseTokenSource) Token() (*Token, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.t.Valid() { + return s.t, nil + } + t, err := s.new.Token() + if err != nil { + return nil, err + } + s.t = t + return t, nil +} + +// StaticTokenSource returns a TokenSource that always returns the same token. +// Because the provided token t is never refreshed, StaticTokenSource is only +// useful for tokens that never expire. +func StaticTokenSource(t *Token) TokenSource { + return staticTokenSource{t} +} + +// staticTokenSource is a TokenSource that always returns the same Token. +type staticTokenSource struct { + t *Token +} + +func (s staticTokenSource) Token() (*Token, error) { + return s.t, nil +} + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient internal.ContextKey + +// NewClient creates an *http.Client from a Context and TokenSource. +// The returned client is not valid beyond the lifetime of the context. +// +// As a special case, if src is nil, a non-OAuth2 client is returned +// using the provided context. This exists to support related OAuth2 +// packages. +func NewClient(ctx context.Context, src TokenSource) *http.Client { + if src == nil { + c, err := internal.ContextClient(ctx) + if err != nil { + return &http.Client{Transport: internal.ErrorTransport{err}} + } + return c + } + return &http.Client{ + Transport: &Transport{ + Base: internal.ContextTransport(ctx), + Source: ReuseTokenSource(nil, src), + }, + } +} + +// ReuseTokenSource returns a TokenSource which repeatedly returns the +// same token as long as it's valid, starting with t. +// When its cached token is invalid, a new token is obtained from src. +// +// ReuseTokenSource is typically used to reuse tokens from a cache +// (such as a file on disk) between runs of a program, rather than +// obtaining new tokens unnecessarily. +// +// The initial token t may be nil, in which case the TokenSource is +// wrapped in a caching version if it isn't one already. This also +// means it's always safe to wrap ReuseTokenSource around any other +// TokenSource without adverse effects. +func ReuseTokenSource(t *Token, src TokenSource) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly. + return rt + } + src = rt.new + } + return &reuseTokenSource{ + t: t, + new: src, + } +} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go new file mode 100644 index 00000000..7a3167f1 --- /dev/null +++ b/vendor/golang.org/x/oauth2/token.go @@ -0,0 +1,158 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// expiryDelta determines how earlier a token should be considered +// expired than its actual expiration time. It is used to avoid late +// expirations due to client-server time mismatches. +const expiryDelta = 10 * time.Second + +// Token represents the crendentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// Most users of this package should not access fields of Token +// directly. They're exported mostly for use by related packages +// implementing derivative OAuth2 flows. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string `json:"access_token"` + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string `json:"token_type,omitempty"` + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string `json:"refresh_token,omitempty"` + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time `json:"expiry,omitempty"` + + // raw optionally contains extra metadata from the server + // when updating a token. + raw interface{} +} + +// Type returns t.TokenType if non-empty, else "Bearer". +func (t *Token) Type() string { + if strings.EqualFold(t.TokenType, "bearer") { + return "Bearer" + } + if strings.EqualFold(t.TokenType, "mac") { + return "MAC" + } + if strings.EqualFold(t.TokenType, "basic") { + return "Basic" + } + if t.TokenType != "" { + return t.TokenType + } + return "Bearer" +} + +// SetAuthHeader sets the Authorization header to r using the access +// token in t. +// +// This method is unnecessary when using Transport or an HTTP Client +// returned by this package. +func (t *Token) SetAuthHeader(r *http.Request) { + r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) +} + +// WithExtra returns a new Token that's a clone of t, but using the +// provided raw extra map. This is only intended for use by packages +// implementing derivative OAuth2 flows. +func (t *Token) WithExtra(extra interface{}) *Token { + t2 := new(Token) + *t2 = *t + t2.raw = extra + return t2 +} + +// Extra returns an extra field. +// Extra fields are key-value pairs returned by the server as a +// part of the token retrieval response. +func (t *Token) Extra(key string) interface{} { + if raw, ok := t.raw.(map[string]interface{}); ok { + return raw[key] + } + + vals, ok := t.raw.(url.Values) + if !ok { + return nil + } + + v := vals.Get(key) + switch s := strings.TrimSpace(v); strings.Count(s, ".") { + case 0: // Contains no "."; try to parse as int + if i, err := strconv.ParseInt(s, 10, 64); err == nil { + return i + } + case 1: // Contains a single "."; try to parse as float + if f, err := strconv.ParseFloat(s, 64); err == nil { + return f + } + } + + return v +} + +// expired reports whether the token is expired. +// t must be non-nil. +func (t *Token) expired() bool { + if t.Expiry.IsZero() { + return false + } + return t.Expiry.Add(-expiryDelta).Before(time.Now()) +} + +// Valid reports whether t is non-nil, has an AccessToken, and is not expired. +func (t *Token) Valid() bool { + return t != nil && t.AccessToken != "" && !t.expired() +} + +// tokenFromInternal maps an *internal.Token struct into +// a *Token struct. +func tokenFromInternal(t *internal.Token) *Token { + if t == nil { + return nil + } + return &Token{ + AccessToken: t.AccessToken, + TokenType: t.TokenType, + RefreshToken: t.RefreshToken, + Expiry: t.Expiry, + raw: t.Raw, + } +} + +// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. +// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along +// with an error.. +func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v) + if err != nil { + return nil, err + } + return tokenFromInternal(tk), nil +} diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go new file mode 100644 index 00000000..92ac7e25 --- /dev/null +++ b/vendor/golang.org/x/oauth2/transport.go @@ -0,0 +1,132 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "errors" + "io" + "net/http" + "sync" +) + +// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, +// wrapping a base RoundTripper and adding an Authorization header +// with a token from the supplied Sources. +// +// Transport is a low-level mechanism. Most code will use the +// higher-level Config.Client method instead. +type Transport struct { + // Source supplies the token to add to outgoing requests' + // Authorization headers. + Source TokenSource + + // Base is the base RoundTripper used to make HTTP requests. + // If nil, http.DefaultTransport is used. + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token. If no token exists or token is expired, +// tries to refresh/fetch a new token. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + if t.Source == nil { + return nil, errors.New("oauth2: Transport's Source is nil") + } + token, err := t.Source.Token() + if err != nil { + return nil, err + } + + req2 := cloneRequest(req) // per RoundTripper contract + token.SetAuthHeader(req2) + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *Transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} diff --git a/vendor/google.golang.org/api/LICENSE b/vendor/google.golang.org/api/LICENSE new file mode 100644 index 00000000..263aa7a0 --- /dev/null +++ b/vendor/google.golang.org/api/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/google.golang.org/api/drive/v3/drive-api.json b/vendor/google.golang.org/api/drive/v3/drive-api.json new file mode 100644 index 00000000..896d44e3 --- /dev/null +++ b/vendor/google.golang.org/api/drive/v3/drive-api.json @@ -0,0 +1,2410 @@ +{ + "kind": "discovery#restDescription", + "etag": "\"bRFOOrZKfO9LweMbPqu0kcu6De8/O9_NbpoVnW5GMGl7qWBIajcyrt8\"", + "discoveryVersion": "v1", + "id": "drive:v3", + "name": "drive", + "version": "v3", + "revision": "20160303", + "title": "Drive API", + "description": "The API to interact with Drive.", + "ownerDomain": "google.com", + "ownerName": "Google", + "icons": { + "x16": "https://ssl.gstatic.com/docs/doclist/images/drive_icon_16.png", + "x32": "https://ssl.gstatic.com/docs/doclist/images/drive_icon_32.png" + }, + "documentationLink": "https://developers.google.com/drive/", + "protocol": "rest", + "baseUrl": "https://www.googleapis.com/drive/v3/", + "basePath": "/drive/v3/", + "rootUrl": "https://www.googleapis.com/", + "servicePath": "drive/v3/", + "batchPath": "batch", + "parameters": { + "alt": { + "type": "string", + "description": "Data format for the response.", + "default": "json", + "enum": [ + "json" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json" + ], + "location": "query" + }, + "fields": { + "type": "string", + "description": "Selector specifying which fields to include in a partial response.", + "location": "query" + }, + "key": { + "type": "string", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query" + }, + "oauth_token": { + "type": "string", + "description": "OAuth 2.0 token for the current user.", + "location": "query" + }, + "prettyPrint": { + "type": "boolean", + "description": "Returns response with indentations and line breaks.", + "default": "true", + "location": "query" + }, + "quotaUser": { + "type": "string", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.", + "location": "query" + }, + "userIp": { + "type": "string", + "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.", + "location": "query" + } + }, + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/drive": { + "description": "View and manage the files in your Google Drive" + }, + "https://www.googleapis.com/auth/drive.appdata": { + "description": "View and manage its own configuration data in your Google Drive" + }, + "https://www.googleapis.com/auth/drive.file": { + "description": "View and manage Google Drive files and folders that you have opened or created with this app" + }, + "https://www.googleapis.com/auth/drive.metadata": { + "description": "View and manage metadata of files in your Google Drive" + }, + "https://www.googleapis.com/auth/drive.metadata.readonly": { + "description": "View metadata for files in your Google Drive" + }, + "https://www.googleapis.com/auth/drive.photos.readonly": { + "description": "View the photos, videos and albums in your Google Photos" + }, + "https://www.googleapis.com/auth/drive.readonly": { + "description": "View the files in your Google Drive" + }, + "https://www.googleapis.com/auth/drive.scripts": { + "description": "Modify your Google Apps Script scripts' behavior" + } + } + } + }, + "schemas": { + "About": { + "id": "About", + "type": "object", + "description": "Information about the user, the user's Drive, and system capabilities.", + "properties": { + "appInstalled": { + "type": "boolean", + "description": "Whether the user has installed the requesting app." + }, + "exportFormats": { + "type": "object", + "description": "A map of source MIME type to possible targets for all supported exports.", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "folderColorPalette": { + "type": "array", + "description": "The currently supported folder colors as RGB hex strings.", + "items": { + "type": "string" + } + }, + "importFormats": { + "type": "object", + "description": "A map of source MIME type to possible targets for all supported imports.", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "kind": { + "type": "string", + "description": "This is always drive#about.", + "default": "drive#about" + }, + "maxImportSizes": { + "type": "object", + "description": "A map of maximum import sizes by MIME type, in bytes.", + "additionalProperties": { + "type": "string", + "format": "int64" + } + }, + "maxUploadSize": { + "type": "string", + "description": "The maximum upload size in bytes.", + "format": "int64" + }, + "storageQuota": { + "type": "object", + "description": "The user's storage quota limits and usage. All fields are measured in bytes.", + "properties": { + "limit": { + "type": "string", + "description": "The usage limit, if applicable. This will not be present if the user has unlimited storage.", + "format": "int64" + }, + "usage": { + "type": "string", + "description": "The total usage across all services.", + "format": "int64" + }, + "usageInDrive": { + "type": "string", + "description": "The usage by all files in Google Drive.", + "format": "int64" + }, + "usageInDriveTrash": { + "type": "string", + "description": "The usage by trashed files in Google Drive.", + "format": "int64" + } + } + }, + "user": { + "$ref": "User", + "description": "The authenticated user." + } + } + }, + "Change": { + "id": "Change", + "type": "object", + "description": "A change to a file.", + "properties": { + "file": { + "$ref": "File", + "description": "The updated state of the file. Present if the file has not been removed." + }, + "fileId": { + "type": "string", + "description": "The ID of the file which has changed." + }, + "kind": { + "type": "string", + "description": "This is always drive#change.", + "default": "drive#change" + }, + "removed": { + "type": "boolean", + "description": "Whether the file has been removed from the view of the changes list, for example by deletion or lost access." + }, + "time": { + "type": "string", + "description": "The time of this change (RFC 3339 date-time).", + "format": "date-time" + } + } + }, + "ChangeList": { + "id": "ChangeList", + "type": "object", + "description": "A list of changes for a user.", + "properties": { + "changes": { + "type": "array", + "description": "The page of changes.", + "items": { + "$ref": "Change" + } + }, + "kind": { + "type": "string", + "description": "This is always drive#changeList.", + "default": "drive#changeList" + }, + "newStartPageToken": { + "type": "string", + "description": "The starting page token for future changes. This will be present only if the end of the current changes list has been reached." + }, + "nextPageToken": { + "type": "string", + "description": "The page token for the next page of changes. This will be absent if the end of the current changes list has been reached." + } + } + }, + "Channel": { + "id": "Channel", + "type": "object", + "description": "An notification channel used to watch for resource changes.", + "properties": { + "address": { + "type": "string", + "description": "The address where notifications are delivered for this channel." + }, + "expiration": { + "type": "string", + "description": "Date and time of notification channel expiration, expressed as a Unix timestamp, in milliseconds. Optional.", + "format": "int64" + }, + "id": { + "type": "string", + "description": "A UUID or similar unique string that identifies this channel." + }, + "kind": { + "type": "string", + "description": "Identifies this as a notification channel used to watch for changes to a resource. Value: the fixed string \"api#channel\".", + "default": "api#channel" + }, + "params": { + "type": "object", + "description": "Additional parameters controlling delivery channel behavior. Optional.", + "additionalProperties": { + "type": "string", + "description": "Declares a new parameter by name." + } + }, + "payload": { + "type": "boolean", + "description": "A Boolean value to indicate whether payload is wanted. Optional." + }, + "resourceId": { + "type": "string", + "description": "An opaque ID that identifies the resource being watched on this channel. Stable across different API versions." + }, + "resourceUri": { + "type": "string", + "description": "A version-specific identifier for the watched resource." + }, + "token": { + "type": "string", + "description": "An arbitrary string delivered to the target address with each notification delivered over this channel. Optional." + }, + "type": { + "type": "string", + "description": "The type of delivery mechanism used for this channel." + } + } + }, + "Comment": { + "id": "Comment", + "type": "object", + "description": "A comment on a file.", + "properties": { + "anchor": { + "type": "string", + "description": "A region of the document represented as a JSON string. See anchor documentation for details on how to define and interpret anchor properties." + }, + "author": { + "$ref": "User", + "description": "The user who created the comment." + }, + "content": { + "type": "string", + "description": "The plain text content of the comment. This field is used for setting the content, while htmlContent should be displayed.", + "annotations": { + "required": [ + "drive.comments.create", + "drive.comments.update" + ] + } + }, + "createdTime": { + "type": "string", + "description": "The time at which the comment was created (RFC 3339 date-time).", + "format": "date-time" + }, + "deleted": { + "type": "boolean", + "description": "Whether the comment has been deleted. A deleted comment has no content." + }, + "htmlContent": { + "type": "string", + "description": "The content of the comment with HTML formatting." + }, + "id": { + "type": "string", + "description": "The ID of the comment." + }, + "kind": { + "type": "string", + "description": "This is always drive#comment.", + "default": "drive#comment" + }, + "modifiedTime": { + "type": "string", + "description": "The last time the comment or any of its replies was modified (RFC 3339 date-time).", + "format": "date-time" + }, + "quotedFileContent": { + "type": "object", + "description": "The file content to which the comment refers, typically within the anchor region. For a text file, for example, this would be the text at the location of the comment.", + "properties": { + "mimeType": { + "type": "string", + "description": "The MIME type of the quoted content." + }, + "value": { + "type": "string", + "description": "The quoted content itself. This is interpreted as plain text if set through the API." + } + } + }, + "replies": { + "type": "array", + "description": "The full list of replies to the comment in chronological order.", + "items": { + "$ref": "Reply" + } + }, + "resolved": { + "type": "boolean", + "description": "Whether the comment has been resolved by one of its replies." + } + } + }, + "CommentList": { + "id": "CommentList", + "type": "object", + "description": "A list of comments on a file.", + "properties": { + "comments": { + "type": "array", + "description": "The page of comments.", + "items": { + "$ref": "Comment" + } + }, + "kind": { + "type": "string", + "description": "This is always drive#commentList.", + "default": "drive#commentList" + }, + "nextPageToken": { + "type": "string", + "description": "The page token for the next page of comments. This will be absent if the end of the comments list has been reached." + } + } + }, + "File": { + "id": "File", + "type": "object", + "description": "The metadata for a file.", + "properties": { + "appProperties": { + "type": "object", + "description": "A collection of arbitrary key-value pairs which are private to the requesting app.\nEntries with null values are cleared in update and copy requests.", + "additionalProperties": { + "type": "string" + } + }, + "capabilities": { + "type": "object", + "description": "Capabilities the current user has on the file.", + "properties": { + "canComment": { + "type": "boolean", + "description": "Whether the user can comment on the file." + }, + "canCopy": { + "type": "boolean", + "description": "Whether the user can copy the file." + }, + "canEdit": { + "type": "boolean", + "description": "Whether the user can edit the file's content." + }, + "canReadRevisions": { + "type": "boolean", + "description": "Whether the current user has read access to the Revisions resource of the file." + }, + "canShare": { + "type": "boolean", + "description": "Whether the user can modify the file's permissions and sharing settings." + } + } + }, + "contentHints": { + "type": "object", + "description": "Additional information about the content of the file. These fields are never populated in responses.", + "properties": { + "indexableText": { + "type": "string", + "description": "Text to be indexed for the file to improve fullText queries. This is limited to 128KB in length and may contain HTML elements." + }, + "thumbnail": { + "type": "object", + "description": "A thumbnail for the file. This will only be used if Drive cannot generate a standard thumbnail.", + "properties": { + "image": { + "type": "string", + "description": "The thumbnail data encoded with URL-safe Base64 (RFC 4648 section 5).", + "format": "byte" + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the thumbnail." + } + } + } + } + }, + "createdTime": { + "type": "string", + "description": "The time at which the file was created (RFC 3339 date-time).", + "format": "date-time" + }, + "description": { + "type": "string", + "description": "A short description of the file." + }, + "explicitlyTrashed": { + "type": "boolean", + "description": "Whether the file has been explicitly trashed, as opposed to recursively trashed from a parent folder." + }, + "fileExtension": { + "type": "string", + "description": "The final component of fullFileExtension. This is only available for files with binary content in Drive." + }, + "folderColorRgb": { + "type": "string", + "description": "The color for a folder as an RGB hex string. The supported colors are published in the folderColorPalette field of the About resource.\nIf an unsupported color is specified, the closest color in the palette will be used instead." + }, + "fullFileExtension": { + "type": "string", + "description": "The full file extension extracted from the name field. May contain multiple concatenated extensions, such as \"tar.gz\". This is only available for files with binary content in Drive.\nThis is automatically updated when the name field changes, however it is not cleared if the new name does not contain a valid extension." + }, + "headRevisionId": { + "type": "string", + "description": "The ID of the file's head revision. This is currently only available for files with binary content in Drive." + }, + "iconLink": { + "type": "string", + "description": "A static, unauthenticated link to the file's icon." + }, + "id": { + "type": "string", + "description": "The ID of the file." + }, + "imageMediaMetadata": { + "type": "object", + "description": "Additional metadata about image media, if available.", + "properties": { + "aperture": { + "type": "number", + "description": "The aperture used to create the photo (f-number).", + "format": "float" + }, + "cameraMake": { + "type": "string", + "description": "The make of the camera used to create the photo." + }, + "cameraModel": { + "type": "string", + "description": "The model of the camera used to create the photo." + }, + "colorSpace": { + "type": "string", + "description": "The color space of the photo." + }, + "exposureBias": { + "type": "number", + "description": "The exposure bias of the photo (APEX value).", + "format": "float" + }, + "exposureMode": { + "type": "string", + "description": "The exposure mode used to create the photo." + }, + "exposureTime": { + "type": "number", + "description": "The length of the exposure, in seconds.", + "format": "float" + }, + "flashUsed": { + "type": "boolean", + "description": "Whether a flash was used to create the photo." + }, + "focalLength": { + "type": "number", + "description": "The focal length used to create the photo, in millimeters.", + "format": "float" + }, + "height": { + "type": "integer", + "description": "The height of the image in pixels.", + "format": "int32" + }, + "isoSpeed": { + "type": "integer", + "description": "The ISO speed used to create the photo.", + "format": "int32" + }, + "lens": { + "type": "string", + "description": "The lens used to create the photo." + }, + "location": { + "type": "object", + "description": "Geographic location information stored in the image.", + "properties": { + "altitude": { + "type": "number", + "description": "The altitude stored in the image.", + "format": "double" + }, + "latitude": { + "type": "number", + "description": "The latitude stored in the image.", + "format": "double" + }, + "longitude": { + "type": "number", + "description": "The longitude stored in the image.", + "format": "double" + } + } + }, + "maxApertureValue": { + "type": "number", + "description": "The smallest f-number of the lens at the focal length used to create the photo (APEX value).", + "format": "float" + }, + "meteringMode": { + "type": "string", + "description": "The metering mode used to create the photo." + }, + "rotation": { + "type": "integer", + "description": "The rotation in clockwise degrees from the image's original orientation.", + "format": "int32" + }, + "sensor": { + "type": "string", + "description": "The type of sensor used to create the photo." + }, + "subjectDistance": { + "type": "integer", + "description": "The distance to the subject of the photo, in meters.", + "format": "int32" + }, + "time": { + "type": "string", + "description": "The date and time the photo was taken (EXIF DateTime)." + }, + "whiteBalance": { + "type": "string", + "description": "The white balance mode used to create the photo." + }, + "width": { + "type": "integer", + "description": "The width of the image in pixels.", + "format": "int32" + } + } + }, + "kind": { + "type": "string", + "description": "This is always drive#file.", + "default": "drive#file" + }, + "lastModifyingUser": { + "$ref": "User", + "description": "The last user to modify the file." + }, + "md5Checksum": { + "type": "string", + "description": "The MD5 checksum for the content of the file. This is only applicable to files with binary content in Drive." + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the file.\nDrive will attempt to automatically detect an appropriate value from uploaded content if no value is provided. The value cannot be changed unless a new revision is uploaded.\nIf a file is created with a Google Doc MIME type, the uploaded content will be imported if possible. The supported import formats are published in the About resource." + }, + "modifiedByMeTime": { + "type": "string", + "description": "The last time the file was modified by the user (RFC 3339 date-time).", + "format": "date-time" + }, + "modifiedTime": { + "type": "string", + "description": "The last time the file was modified by anyone (RFC 3339 date-time).\nNote that setting modifiedTime will also update modifiedByMeTime for the user.", + "format": "date-time" + }, + "name": { + "type": "string", + "description": "The name of the file. This is not necessarily unique within a folder." + }, + "originalFilename": { + "type": "string", + "description": "The original filename of the uploaded content if available, or else the original value of the name field. This is only available for files with binary content in Drive." + }, + "ownedByMe": { + "type": "boolean", + "description": "Whether the user owns the file." + }, + "owners": { + "type": "array", + "description": "The owners of the file. Currently, only certain legacy files may have more than one owner.", + "items": { + "$ref": "User" + } + }, + "parents": { + "type": "array", + "description": "The IDs of the parent folders which contain the file.\nIf not specified as part of a create request, the file will be placed directly in the My Drive folder. Update requests must use the addParents and removeParents parameters to modify the values.", + "items": { + "type": "string" + } + }, + "permissions": { + "type": "array", + "description": "The full list of permissions for the file. This is only available if the requesting user can share the file.", + "items": { + "$ref": "Permission" + } + }, + "properties": { + "type": "object", + "description": "A collection of arbitrary key-value pairs which are visible to all apps.\nEntries with null values are cleared in update and copy requests.", + "additionalProperties": { + "type": "string" + } + }, + "quotaBytesUsed": { + "type": "string", + "description": "The number of storage quota bytes used by the file. This includes the head revision as well as previous revisions with keepForever enabled.", + "format": "int64" + }, + "shared": { + "type": "boolean", + "description": "Whether the file has been shared." + }, + "sharedWithMeTime": { + "type": "string", + "description": "The time at which the file was shared with the user, if applicable (RFC 3339 date-time).", + "format": "date-time" + }, + "sharingUser": { + "$ref": "User", + "description": "The user who shared the file with the requesting user, if applicable." + }, + "size": { + "type": "string", + "description": "The size of the file's content in bytes. This is only applicable to files with binary content in Drive.", + "format": "int64" + }, + "spaces": { + "type": "array", + "description": "The list of spaces which contain the file. The currently supported values are 'drive', 'appDataFolder' and 'photos'.", + "items": { + "type": "string" + } + }, + "starred": { + "type": "boolean", + "description": "Whether the user has starred the file." + }, + "thumbnailLink": { + "type": "string", + "description": "A short-lived link to the file's thumbnail, if available. Typically lasts on the order of hours." + }, + "trashed": { + "type": "boolean", + "description": "Whether the file has been trashed, either explicitly or from a trashed parent folder. Only the owner may trash a file, and other users cannot see files in the owner's trash." + }, + "version": { + "type": "string", + "description": "A monotonically increasing version number for the file. This reflects every change made to the file on the server, even those not visible to the user.", + "format": "int64" + }, + "videoMediaMetadata": { + "type": "object", + "description": "Additional metadata about video media. This may not be available immediately upon upload.", + "properties": { + "durationMillis": { + "type": "string", + "description": "The duration of the video in milliseconds.", + "format": "int64" + }, + "height": { + "type": "integer", + "description": "The height of the video in pixels.", + "format": "int32" + }, + "width": { + "type": "integer", + "description": "The width of the video in pixels.", + "format": "int32" + } + } + }, + "viewedByMe": { + "type": "boolean", + "description": "Whether the file has been viewed by this user." + }, + "viewedByMeTime": { + "type": "string", + "description": "The last time the file was viewed by the user (RFC 3339 date-time).", + "format": "date-time" + }, + "viewersCanCopyContent": { + "type": "boolean", + "description": "Whether users with only reader or commenter permission can copy the file's content. This affects copy, download, and print operations." + }, + "webContentLink": { + "type": "string", + "description": "A link for downloading the content of the file in a browser. This is only available for files with binary content in Drive." + }, + "webViewLink": { + "type": "string", + "description": "A link for opening the file in a relevant Google editor or viewer in a browser." + }, + "writersCanShare": { + "type": "boolean", + "description": "Whether users with only writer permission can modify the file's permissions." + } + } + }, + "FileList": { + "id": "FileList", + "type": "object", + "description": "A list of files.", + "properties": { + "files": { + "type": "array", + "description": "The page of files.", + "items": { + "$ref": "File" + } + }, + "kind": { + "type": "string", + "description": "This is always drive#fileList.", + "default": "drive#fileList" + }, + "nextPageToken": { + "type": "string", + "description": "The page token for the next page of files. This will be absent if the end of the files list has been reached." + } + } + }, + "GeneratedIds": { + "id": "GeneratedIds", + "type": "object", + "description": "A list of generated file IDs which can be provided in create requests.", + "properties": { + "ids": { + "type": "array", + "description": "The IDs generated for the requesting user in the specified space.", + "items": { + "type": "string" + } + }, + "kind": { + "type": "string", + "description": "This is always drive#generatedIds", + "default": "drive#generatedIds" + }, + "space": { + "type": "string", + "description": "The type of file that can be created with these IDs." + } + } + }, + "Permission": { + "id": "Permission", + "type": "object", + "description": "A permission for a file. A permission grants a user, group, domain or the world access to a file or a folder hierarchy.", + "properties": { + "allowFileDiscovery": { + "type": "boolean", + "description": "Whether the permission allows the file to be discovered through search. This is only applicable for permissions of type domain or anyone." + }, + "displayName": { + "type": "string", + "description": "A displayable name for users, groups or domains." + }, + "domain": { + "type": "string", + "description": "The domain to which this permission refers." + }, + "emailAddress": { + "type": "string", + "description": "The email address of the user or group to which this permission refers." + }, + "id": { + "type": "string", + "description": "The ID of this permission. This is a unique identifier for the grantee, and is published in User resources as permissionId." + }, + "kind": { + "type": "string", + "description": "This is always drive#permission.", + "default": "drive#permission" + }, + "photoLink": { + "type": "string", + "description": "A link to the user's profile photo, if available." + }, + "role": { + "type": "string", + "description": "The role granted by this permission. Valid values are: \n- owner \n- writer \n- commenter \n- reader", + "annotations": { + "required": [ + "drive.permissions.create" + ] + } + }, + "type": { + "type": "string", + "description": "The type of the grantee. Valid values are: \n- user \n- group \n- domain \n- anyone", + "annotations": { + "required": [ + "drive.permissions.create" + ] + } + } + } + }, + "PermissionList": { + "id": "PermissionList", + "type": "object", + "description": "A list of permissions for a file.", + "properties": { + "kind": { + "type": "string", + "description": "This is always drive#permissionList.", + "default": "drive#permissionList" + }, + "permissions": { + "type": "array", + "description": "The full list of permissions.", + "items": { + "$ref": "Permission" + } + } + } + }, + "Reply": { + "id": "Reply", + "type": "object", + "description": "A reply to a comment on a file.", + "properties": { + "action": { + "type": "string", + "description": "The action the reply performed to the parent comment. Valid values are: \n- resolve \n- reopen" + }, + "author": { + "$ref": "User", + "description": "The user who created the reply." + }, + "content": { + "type": "string", + "description": "The plain text content of the reply. This field is used for setting the content, while htmlContent should be displayed. This is required on creates if no action is specified.", + "annotations": { + "required": [ + "drive.replies.update" + ] + } + }, + "createdTime": { + "type": "string", + "description": "The time at which the reply was created (RFC 3339 date-time).", + "format": "date-time" + }, + "deleted": { + "type": "boolean", + "description": "Whether the reply has been deleted. A deleted reply has no content." + }, + "htmlContent": { + "type": "string", + "description": "The content of the reply with HTML formatting." + }, + "id": { + "type": "string", + "description": "The ID of the reply." + }, + "kind": { + "type": "string", + "description": "This is always drive#reply.", + "default": "drive#reply" + }, + "modifiedTime": { + "type": "string", + "description": "The last time the reply was modified (RFC 3339 date-time).", + "format": "date-time" + } + } + }, + "ReplyList": { + "id": "ReplyList", + "type": "object", + "description": "A list of replies to a comment on a file.", + "properties": { + "kind": { + "type": "string", + "description": "This is always drive#replyList.", + "default": "drive#replyList" + }, + "nextPageToken": { + "type": "string", + "description": "The page token for the next page of replies. This will be absent if the end of the replies list has been reached." + }, + "replies": { + "type": "array", + "description": "The page of replies.", + "items": { + "$ref": "Reply" + } + } + } + }, + "Revision": { + "id": "Revision", + "type": "object", + "description": "The metadata for a revision to a file.", + "properties": { + "id": { + "type": "string", + "description": "The ID of the revision." + }, + "keepForever": { + "type": "boolean", + "description": "Whether to keep this revision forever, even if it is no longer the head revision. If not set, the revision will be automatically purged 30 days after newer content is uploaded. This can be set on a maximum of 200 revisions for a file.\nThis field is only applicable to files with binary content in Drive." + }, + "kind": { + "type": "string", + "description": "This is always drive#revision.", + "default": "drive#revision" + }, + "lastModifyingUser": { + "$ref": "User", + "description": "The last user to modify this revision." + }, + "md5Checksum": { + "type": "string", + "description": "The MD5 checksum of the revision's content. This is only applicable to files with binary content in Drive." + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the revision." + }, + "modifiedTime": { + "type": "string", + "description": "The last time the revision was modified (RFC 3339 date-time).", + "format": "date-time" + }, + "originalFilename": { + "type": "string", + "description": "The original filename used to create this revision. This is only applicable to files with binary content in Drive." + }, + "publishAuto": { + "type": "boolean", + "description": "Whether subsequent revisions will be automatically republished. This is only applicable to Google Docs." + }, + "published": { + "type": "boolean", + "description": "Whether this revision is published. This is only applicable to Google Docs." + }, + "publishedOutsideDomain": { + "type": "boolean", + "description": "Whether this revision is published outside the domain. This is only applicable to Google Docs." + }, + "size": { + "type": "string", + "description": "The size of the revision's content in bytes. This is only applicable to files with binary content in Drive.", + "format": "int64" + } + } + }, + "RevisionList": { + "id": "RevisionList", + "type": "object", + "description": "A list of revisions of a file.", + "properties": { + "kind": { + "type": "string", + "description": "This is always drive#revisionList.", + "default": "drive#revisionList" + }, + "revisions": { + "type": "array", + "description": "The full list of revisions.", + "items": { + "$ref": "Revision" + } + } + } + }, + "StartPageToken": { + "id": "StartPageToken", + "type": "object", + "properties": { + "kind": { + "type": "string", + "description": "This is always drive#startPageToken.", + "default": "drive#startPageToken" + }, + "startPageToken": { + "type": "string", + "description": "The starting page token for listing changes." + } + } + }, + "User": { + "id": "User", + "type": "object", + "description": "Information about a Drive user.", + "properties": { + "displayName": { + "type": "string", + "description": "A plain text displayable name for this user." + }, + "emailAddress": { + "type": "string", + "description": "The email address of the user. This may not be present in certain contexts if the user has not made their email address visible to the requester." + }, + "kind": { + "type": "string", + "description": "This is always drive#user.", + "default": "drive#user" + }, + "me": { + "type": "boolean", + "description": "Whether this user is the requesting user." + }, + "permissionId": { + "type": "string", + "description": "The user's ID as visible in Permission resources." + }, + "photoLink": { + "type": "string", + "description": "A link to the user's profile photo, if available." + } + } + } + }, + "resources": { + "about": { + "methods": { + "get": { + "id": "drive.about.get", + "path": "about", + "httpMethod": "GET", + "description": "Gets information about the user, the user's Drive, and system capabilities.", + "response": { + "$ref": "About" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ] + } + } + }, + "changes": { + "methods": { + "getStartPageToken": { + "id": "drive.changes.getStartPageToken", + "path": "changes/startPageToken", + "httpMethod": "GET", + "description": "Gets the starting pageToken for listing future changes.", + "response": { + "$ref": "StartPageToken" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "list": { + "id": "drive.changes.list", + "path": "changes", + "httpMethod": "GET", + "description": "Lists changes for a user.", + "parameters": { + "includeRemoved": { + "type": "boolean", + "description": "Whether to include changes indicating that items have left the view of the changes list, for example by deletion or lost access.", + "default": "true", + "location": "query" + }, + "pageSize": { + "type": "integer", + "description": "The maximum number of changes to return per page.", + "default": "100", + "format": "int32", + "minimum": "1", + "maximum": "1000", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response or to the response from the getStartPageToken method.", + "required": true, + "location": "query" + }, + "restrictToMyDrive": { + "type": "boolean", + "description": "Whether to restrict the results to changes inside the My Drive hierarchy. This omits changes to files such as those in the Application Data folder or shared files which have not been added to My Drive.", + "default": "false", + "location": "query" + }, + "spaces": { + "type": "string", + "description": "A comma-separated list of spaces to query within the user corpus. Supported values are 'drive', 'appDataFolder' and 'photos'.", + "default": "drive", + "location": "query" + } + }, + "parameterOrder": [ + "pageToken" + ], + "response": { + "$ref": "ChangeList" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ], + "supportsSubscription": true + }, + "watch": { + "id": "drive.changes.watch", + "path": "changes/watch", + "httpMethod": "POST", + "description": "Subscribes to changes for a user.", + "parameters": { + "includeRemoved": { + "type": "boolean", + "description": "Whether to include changes indicating that items have left the view of the changes list, for example by deletion or lost access.", + "default": "true", + "location": "query" + }, + "pageSize": { + "type": "integer", + "description": "The maximum number of changes to return per page.", + "default": "100", + "format": "int32", + "minimum": "1", + "maximum": "1000", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response or to the response from the getStartPageToken method.", + "required": true, + "location": "query" + }, + "restrictToMyDrive": { + "type": "boolean", + "description": "Whether to restrict the results to changes inside the My Drive hierarchy. This omits changes to files such as those in the Application Data folder or shared files which have not been added to My Drive.", + "default": "false", + "location": "query" + }, + "spaces": { + "type": "string", + "description": "A comma-separated list of spaces to query within the user corpus. Supported values are 'drive', 'appDataFolder' and 'photos'.", + "default": "drive", + "location": "query" + } + }, + "parameterOrder": [ + "pageToken" + ], + "request": { + "$ref": "Channel", + "parameterName": "resource" + }, + "response": { + "$ref": "Channel" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ], + "supportsSubscription": true + } + } + }, + "channels": { + "methods": { + "stop": { + "id": "drive.channels.stop", + "path": "channels/stop", + "httpMethod": "POST", + "description": "Stop watching resources through this channel", + "request": { + "$ref": "Channel", + "parameterName": "resource" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ] + } + } + }, + "comments": { + "methods": { + "create": { + "id": "drive.comments.create", + "path": "files/{fileId}/comments", + "httpMethod": "POST", + "description": "Creates a new comment on a file.", + "parameters": { + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId" + ], + "request": { + "$ref": "Comment" + }, + "response": { + "$ref": "Comment" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file" + ] + }, + "delete": { + "id": "drive.comments.delete", + "path": "files/{fileId}/comments/{commentId}", + "httpMethod": "DELETE", + "description": "Deletes a comment.", + "parameters": { + "commentId": { + "type": "string", + "description": "The ID of the comment.", + "required": true, + "location": "path" + }, + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId", + "commentId" + ], + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file" + ] + }, + "get": { + "id": "drive.comments.get", + "path": "files/{fileId}/comments/{commentId}", + "httpMethod": "GET", + "description": "Gets a comment by ID.", + "parameters": { + "commentId": { + "type": "string", + "description": "The ID of the comment.", + "required": true, + "location": "path" + }, + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "includeDeleted": { + "type": "boolean", + "description": "Whether to return deleted comments. Deleted comments will not include their original content.", + "default": "false", + "location": "query" + } + }, + "parameterOrder": [ + "fileId", + "commentId" + ], + "response": { + "$ref": "Comment" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "list": { + "id": "drive.comments.list", + "path": "files/{fileId}/comments", + "httpMethod": "GET", + "description": "Lists a file's comments.", + "parameters": { + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "includeDeleted": { + "type": "boolean", + "description": "Whether to include deleted comments. Deleted comments will not include their original content.", + "default": "false", + "location": "query" + }, + "pageSize": { + "type": "integer", + "description": "The maximum number of comments to return per page.", + "default": "20", + "format": "int32", + "minimum": "1", + "maximum": "100", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.", + "location": "query" + }, + "startModifiedTime": { + "type": "string", + "description": "The minimum value of 'modifiedTime' for the result comments (RFC 3339 date-time).", + "location": "query" + } + }, + "parameterOrder": [ + "fileId" + ], + "response": { + "$ref": "CommentList" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "update": { + "id": "drive.comments.update", + "path": "files/{fileId}/comments/{commentId}", + "httpMethod": "PATCH", + "description": "Updates a comment with patch semantics.", + "parameters": { + "commentId": { + "type": "string", + "description": "The ID of the comment.", + "required": true, + "location": "path" + }, + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId", + "commentId" + ], + "request": { + "$ref": "Comment" + }, + "response": { + "$ref": "Comment" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file" + ] + } + } + }, + "files": { + "methods": { + "copy": { + "id": "drive.files.copy", + "path": "files/{fileId}/copy", + "httpMethod": "POST", + "description": "Creates a copy of a file and applies any requested updates with patch semantics.", + "parameters": { + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "ignoreDefaultVisibility": { + "type": "boolean", + "description": "Whether to ignore the domain's default visibility settings for the created file. Domain administrators can choose to make all uploaded files visible to the domain by default; this parameter bypasses that behavior for the request. Permissions are still inherited from parent folders.", + "default": "false", + "location": "query" + }, + "keepRevisionForever": { + "type": "boolean", + "description": "Whether to set the 'keepForever' field in the new head revision. This is only applicable to files with binary content in Drive.", + "default": "false", + "location": "query" + }, + "ocrLanguage": { + "type": "string", + "description": "A language hint for OCR processing during image import (ISO 639-1 code).", + "location": "query" + } + }, + "parameterOrder": [ + "fileId" + ], + "request": { + "$ref": "File" + }, + "response": { + "$ref": "File" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.photos.readonly" + ] + }, + "create": { + "id": "drive.files.create", + "path": "files", + "httpMethod": "POST", + "description": "Creates a new file.", + "parameters": { + "ignoreDefaultVisibility": { + "type": "boolean", + "description": "Whether to ignore the domain's default visibility settings for the created file. Domain administrators can choose to make all uploaded files visible to the domain by default; this parameter bypasses that behavior for the request. Permissions are still inherited from parent folders.", + "default": "false", + "location": "query" + }, + "keepRevisionForever": { + "type": "boolean", + "description": "Whether to set the 'keepForever' field in the new head revision. This is only applicable to files with binary content in Drive.", + "default": "false", + "location": "query" + }, + "ocrLanguage": { + "type": "string", + "description": "A language hint for OCR processing during image import (ISO 639-1 code).", + "location": "query" + }, + "useContentAsIndexableText": { + "type": "boolean", + "description": "Whether to use the uploaded content as indexable text.", + "default": "false", + "location": "query" + } + }, + "request": { + "$ref": "File" + }, + "response": { + "$ref": "File" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file" + ], + "supportsMediaUpload": true, + "mediaUpload": { + "accept": [ + "*/*" + ], + "maxSize": "5120GB", + "protocols": { + "simple": { + "multipart": true, + "path": "/upload/drive/v3/files" + }, + "resumable": { + "multipart": true, + "path": "/resumable/upload/drive/v3/files" + } + } + }, + "supportsSubscription": true + }, + "delete": { + "id": "drive.files.delete", + "path": "files/{fileId}", + "httpMethod": "DELETE", + "description": "Permanently deletes a file owned by the user without moving it to the trash. If the target is a folder, all descendants owned by the user are also deleted.", + "parameters": { + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId" + ], + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file" + ] + }, + "emptyTrash": { + "id": "drive.files.emptyTrash", + "path": "files/trash", + "httpMethod": "DELETE", + "description": "Permanently deletes all of the user's trashed files.", + "scopes": [ + "https://www.googleapis.com/auth/drive" + ] + }, + "export": { + "id": "drive.files.export", + "path": "files/{fileId}/export", + "httpMethod": "GET", + "description": "Exports a Google Doc to the requested MIME type and returns the exported content.", + "parameters": { + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the format requested for this export.", + "required": true, + "location": "query" + } + }, + "parameterOrder": [ + "fileId", + "mimeType" + ], + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.readonly" + ], + "supportsMediaDownload": true + }, + "generateIds": { + "id": "drive.files.generateIds", + "path": "files/generateIds", + "httpMethod": "GET", + "description": "Generates a set of file IDs which can be provided in create requests.", + "parameters": { + "count": { + "type": "integer", + "description": "The number of IDs to return.", + "default": "10", + "format": "int32", + "minimum": "1", + "maximum": "1000", + "location": "query" + }, + "space": { + "type": "string", + "description": "The space in which the IDs can be used to create new files. Supported values are 'drive' and 'appDataFolder'.", + "default": "drive", + "location": "query" + } + }, + "response": { + "$ref": "GeneratedIds" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file" + ] + }, + "get": { + "id": "drive.files.get", + "path": "files/{fileId}", + "httpMethod": "GET", + "description": "Gets a file's metadata or content by ID.", + "parameters": { + "acknowledgeAbuse": { + "type": "boolean", + "description": "Whether the user is acknowledging the risk of downloading known malware or other abusive files. This is only applicable when alt=media.", + "default": "false", + "location": "query" + }, + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId" + ], + "response": { + "$ref": "File" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ], + "supportsMediaDownload": true, + "useMediaDownloadService": true, + "supportsSubscription": true + }, + "list": { + "id": "drive.files.list", + "path": "files", + "httpMethod": "GET", + "description": "Lists or searches files.", + "parameters": { + "corpus": { + "type": "string", + "description": "The source of files to list.", + "default": "user", + "enum": [ + "domain", + "user" + ], + "enumDescriptions": [ + "Files shared to the user's domain.", + "Files owned by or shared to the user." + ], + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "A comma-separated list of sort keys. Valid keys are 'createdTime', 'folder', 'modifiedByMeTime', 'modifiedTime', 'name', 'quotaBytesUsed', 'recency', 'sharedWithMeTime', 'starred', and 'viewedByMeTime'. Each key sorts ascending by default, but may be reversed with the 'desc' modifier. Example usage: ?orderBy=folder,modifiedTime desc,name. Please note that there is a current limitation for users with approximately one million files in which the requested sort order is ignored.", + "location": "query" + }, + "pageSize": { + "type": "integer", + "description": "The maximum number of files to return per page.", + "default": "100", + "format": "int32", + "minimum": "1", + "maximum": "1000", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.", + "location": "query" + }, + "q": { + "type": "string", + "description": "A query for filtering the file results. See the \"Search for Files\" guide for supported syntax.", + "location": "query" + }, + "spaces": { + "type": "string", + "description": "A comma-separated list of spaces to query within the corpus. Supported values are 'drive', 'appDataFolder' and 'photos'.", + "default": "drive", + "location": "query" + } + }, + "response": { + "$ref": "FileList" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "update": { + "id": "drive.files.update", + "path": "files/{fileId}", + "httpMethod": "PATCH", + "description": "Updates a file's metadata and/or content with patch semantics.", + "parameters": { + "addParents": { + "type": "string", + "description": "A comma-separated list of parent IDs to add.", + "location": "query" + }, + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "keepRevisionForever": { + "type": "boolean", + "description": "Whether to set the 'keepForever' field in the new head revision. This is only applicable to files with binary content in Drive.", + "default": "false", + "location": "query" + }, + "ocrLanguage": { + "type": "string", + "description": "A language hint for OCR processing during image import (ISO 639-1 code).", + "location": "query" + }, + "removeParents": { + "type": "string", + "description": "A comma-separated list of parent IDs to remove.", + "location": "query" + }, + "useContentAsIndexableText": { + "type": "boolean", + "description": "Whether to use the uploaded content as indexable text.", + "default": "false", + "location": "query" + } + }, + "parameterOrder": [ + "fileId" + ], + "request": { + "$ref": "File" + }, + "response": { + "$ref": "File" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.scripts" + ], + "supportsMediaUpload": true, + "mediaUpload": { + "accept": [ + "*/*" + ], + "maxSize": "5120GB", + "protocols": { + "simple": { + "multipart": true, + "path": "/upload/drive/v3/files/{fileId}" + }, + "resumable": { + "multipart": true, + "path": "/resumable/upload/drive/v3/files/{fileId}" + } + } + } + }, + "watch": { + "id": "drive.files.watch", + "path": "files/{fileId}/watch", + "httpMethod": "POST", + "description": "Subscribes to changes to a file", + "parameters": { + "acknowledgeAbuse": { + "type": "boolean", + "description": "Whether the user is acknowledging the risk of downloading known malware or other abusive files. This is only applicable when alt=media.", + "default": "false", + "location": "query" + }, + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId" + ], + "request": { + "$ref": "Channel", + "parameterName": "resource" + }, + "response": { + "$ref": "Channel" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ], + "supportsMediaDownload": true, + "useMediaDownloadService": true, + "supportsSubscription": true + } + } + }, + "permissions": { + "methods": { + "create": { + "id": "drive.permissions.create", + "path": "files/{fileId}/permissions", + "httpMethod": "POST", + "description": "Creates a permission for a file.", + "parameters": { + "emailMessage": { + "type": "string", + "description": "A custom message to include in the notification email.", + "location": "query" + }, + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "sendNotificationEmail": { + "type": "boolean", + "description": "Whether to send a notification email when sharing to users or groups. This defaults to true for users and groups, and is not allowed for other requests. It must not be disabled for ownership transfers.", + "location": "query" + }, + "transferOwnership": { + "type": "boolean", + "description": "Whether to transfer ownership to the specified user and downgrade the current owner to a writer. This parameter is required as an acknowledgement of the side effect.", + "default": "false", + "location": "query" + } + }, + "parameterOrder": [ + "fileId" + ], + "request": { + "$ref": "Permission" + }, + "response": { + "$ref": "Permission" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file" + ] + }, + "delete": { + "id": "drive.permissions.delete", + "path": "files/{fileId}/permissions/{permissionId}", + "httpMethod": "DELETE", + "description": "Deletes a permission.", + "parameters": { + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "permissionId": { + "type": "string", + "description": "The ID of the permission.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId", + "permissionId" + ], + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file" + ] + }, + "get": { + "id": "drive.permissions.get", + "path": "files/{fileId}/permissions/{permissionId}", + "httpMethod": "GET", + "description": "Gets a permission by ID.", + "parameters": { + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "permissionId": { + "type": "string", + "description": "The ID of the permission.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId", + "permissionId" + ], + "response": { + "$ref": "Permission" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "list": { + "id": "drive.permissions.list", + "path": "files/{fileId}/permissions", + "httpMethod": "GET", + "description": "Lists a file's permissions.", + "parameters": { + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId" + ], + "response": { + "$ref": "PermissionList" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "update": { + "id": "drive.permissions.update", + "path": "files/{fileId}/permissions/{permissionId}", + "httpMethod": "PATCH", + "description": "Updates a permission with patch semantics.", + "parameters": { + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "permissionId": { + "type": "string", + "description": "The ID of the permission.", + "required": true, + "location": "path" + }, + "transferOwnership": { + "type": "boolean", + "description": "Whether to transfer ownership to the specified user and downgrade the current owner to a writer. This parameter is required as an acknowledgement of the side effect.", + "default": "false", + "location": "query" + } + }, + "parameterOrder": [ + "fileId", + "permissionId" + ], + "request": { + "$ref": "Permission" + }, + "response": { + "$ref": "Permission" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file" + ] + } + } + }, + "replies": { + "methods": { + "create": { + "id": "drive.replies.create", + "path": "files/{fileId}/comments/{commentId}/replies", + "httpMethod": "POST", + "description": "Creates a new reply to a comment.", + "parameters": { + "commentId": { + "type": "string", + "description": "The ID of the comment.", + "required": true, + "location": "path" + }, + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId", + "commentId" + ], + "request": { + "$ref": "Reply" + }, + "response": { + "$ref": "Reply" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file" + ] + }, + "delete": { + "id": "drive.replies.delete", + "path": "files/{fileId}/comments/{commentId}/replies/{replyId}", + "httpMethod": "DELETE", + "description": "Deletes a reply.", + "parameters": { + "commentId": { + "type": "string", + "description": "The ID of the comment.", + "required": true, + "location": "path" + }, + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "replyId": { + "type": "string", + "description": "The ID of the reply.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId", + "commentId", + "replyId" + ], + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file" + ] + }, + "get": { + "id": "drive.replies.get", + "path": "files/{fileId}/comments/{commentId}/replies/{replyId}", + "httpMethod": "GET", + "description": "Gets a reply by ID.", + "parameters": { + "commentId": { + "type": "string", + "description": "The ID of the comment.", + "required": true, + "location": "path" + }, + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "includeDeleted": { + "type": "boolean", + "description": "Whether to return deleted replies. Deleted replies will not include their original content.", + "default": "false", + "location": "query" + }, + "replyId": { + "type": "string", + "description": "The ID of the reply.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId", + "commentId", + "replyId" + ], + "response": { + "$ref": "Reply" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "list": { + "id": "drive.replies.list", + "path": "files/{fileId}/comments/{commentId}/replies", + "httpMethod": "GET", + "description": "Lists a comment's replies.", + "parameters": { + "commentId": { + "type": "string", + "description": "The ID of the comment.", + "required": true, + "location": "path" + }, + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "includeDeleted": { + "type": "boolean", + "description": "Whether to include deleted replies. Deleted replies will not include their original content.", + "default": "false", + "location": "query" + }, + "pageSize": { + "type": "integer", + "description": "The maximum number of replies to return per page.", + "default": "20", + "format": "int32", + "minimum": "1", + "maximum": "100", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.", + "location": "query" + } + }, + "parameterOrder": [ + "fileId", + "commentId" + ], + "response": { + "$ref": "ReplyList" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "update": { + "id": "drive.replies.update", + "path": "files/{fileId}/comments/{commentId}/replies/{replyId}", + "httpMethod": "PATCH", + "description": "Updates a reply with patch semantics.", + "parameters": { + "commentId": { + "type": "string", + "description": "The ID of the comment.", + "required": true, + "location": "path" + }, + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "replyId": { + "type": "string", + "description": "The ID of the reply.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId", + "commentId", + "replyId" + ], + "request": { + "$ref": "Reply" + }, + "response": { + "$ref": "Reply" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file" + ] + } + } + }, + "revisions": { + "methods": { + "delete": { + "id": "drive.revisions.delete", + "path": "files/{fileId}/revisions/{revisionId}", + "httpMethod": "DELETE", + "description": "Permanently deletes a revision. This method is only applicable to files with binary content in Drive.", + "parameters": { + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "revisionId": { + "type": "string", + "description": "The ID of the revision.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId", + "revisionId" + ], + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file" + ] + }, + "get": { + "id": "drive.revisions.get", + "path": "files/{fileId}/revisions/{revisionId}", + "httpMethod": "GET", + "description": "Gets a revision's metadata or content by ID.", + "parameters": { + "acknowledgeAbuse": { + "type": "boolean", + "description": "Whether the user is acknowledging the risk of downloading known malware or other abusive files. This is only applicable when alt=media.", + "default": "false", + "location": "query" + }, + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "revisionId": { + "type": "string", + "description": "The ID of the revision.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId", + "revisionId" + ], + "response": { + "$ref": "Revision" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ], + "supportsMediaDownload": true, + "useMediaDownloadService": true + }, + "list": { + "id": "drive.revisions.list", + "path": "files/{fileId}/revisions", + "httpMethod": "GET", + "description": "Lists a file's revisions.", + "parameters": { + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId" + ], + "response": { + "$ref": "RevisionList" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "update": { + "id": "drive.revisions.update", + "path": "files/{fileId}/revisions/{revisionId}", + "httpMethod": "PATCH", + "description": "Updates a revision with patch semantics.", + "parameters": { + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "revisionId": { + "type": "string", + "description": "The ID of the revision.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId", + "revisionId" + ], + "request": { + "$ref": "Revision" + }, + "response": { + "$ref": "Revision" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file" + ] + } + } + } + } +} diff --git a/vendor/google.golang.org/api/drive/v3/drive-gen.go b/vendor/google.golang.org/api/drive/v3/drive-gen.go new file mode 100644 index 00000000..9e6e0874 --- /dev/null +++ b/vendor/google.golang.org/api/drive/v3/drive-gen.go @@ -0,0 +1,6434 @@ +// Package drive provides access to the Drive API. +// +// See https://developers.google.com/drive/ +// +// Usage example: +// +// import "google.golang.org/api/drive/v3" +// ... +// driveService, err := drive.New(oauthHttpClient) +package drive + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "drive:v3" +const apiName = "drive" +const apiVersion = "v3" +const basePath = "https://www.googleapis.com/drive/v3/" + +// OAuth2 scopes used by this API. +const ( + // View and manage the files in your Google Drive + DriveScope = "https://www.googleapis.com/auth/drive" + + // View and manage its own configuration data in your Google Drive + DriveAppdataScope = "https://www.googleapis.com/auth/drive.appdata" + + // View and manage Google Drive files and folders that you have opened + // or created with this app + DriveFileScope = "https://www.googleapis.com/auth/drive.file" + + // View and manage metadata of files in your Google Drive + DriveMetadataScope = "https://www.googleapis.com/auth/drive.metadata" + + // View metadata for files in your Google Drive + DriveMetadataReadonlyScope = "https://www.googleapis.com/auth/drive.metadata.readonly" + + // View the photos, videos and albums in your Google Photos + DrivePhotosReadonlyScope = "https://www.googleapis.com/auth/drive.photos.readonly" + + // View the files in your Google Drive + DriveReadonlyScope = "https://www.googleapis.com/auth/drive.readonly" + + // Modify your Google Apps Script scripts' behavior + DriveScriptsScope = "https://www.googleapis.com/auth/drive.scripts" +) + +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.About = NewAboutService(s) + s.Changes = NewChangesService(s) + s.Channels = NewChannelsService(s) + s.Comments = NewCommentsService(s) + s.Files = NewFilesService(s) + s.Permissions = NewPermissionsService(s) + s.Replies = NewRepliesService(s) + s.Revisions = NewRevisionsService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + + About *AboutService + + Changes *ChangesService + + Channels *ChannelsService + + Comments *CommentsService + + Files *FilesService + + Permissions *PermissionsService + + Replies *RepliesService + + Revisions *RevisionsService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func NewAboutService(s *Service) *AboutService { + rs := &AboutService{s: s} + return rs +} + +type AboutService struct { + s *Service +} + +func NewChangesService(s *Service) *ChangesService { + rs := &ChangesService{s: s} + return rs +} + +type ChangesService struct { + s *Service +} + +func NewChannelsService(s *Service) *ChannelsService { + rs := &ChannelsService{s: s} + return rs +} + +type ChannelsService struct { + s *Service +} + +func NewCommentsService(s *Service) *CommentsService { + rs := &CommentsService{s: s} + return rs +} + +type CommentsService struct { + s *Service +} + +func NewFilesService(s *Service) *FilesService { + rs := &FilesService{s: s} + return rs +} + +type FilesService struct { + s *Service +} + +func NewPermissionsService(s *Service) *PermissionsService { + rs := &PermissionsService{s: s} + return rs +} + +type PermissionsService struct { + s *Service +} + +func NewRepliesService(s *Service) *RepliesService { + rs := &RepliesService{s: s} + return rs +} + +type RepliesService struct { + s *Service +} + +func NewRevisionsService(s *Service) *RevisionsService { + rs := &RevisionsService{s: s} + return rs +} + +type RevisionsService struct { + s *Service +} + +// About: Information about the user, the user's Drive, and system +// capabilities. +type About struct { + // AppInstalled: Whether the user has installed the requesting app. + AppInstalled bool `json:"appInstalled,omitempty"` + + // ExportFormats: A map of source MIME type to possible targets for all + // supported exports. + ExportFormats map[string][]string `json:"exportFormats,omitempty"` + + // FolderColorPalette: The currently supported folder colors as RGB hex + // strings. + FolderColorPalette []string `json:"folderColorPalette,omitempty"` + + // ImportFormats: A map of source MIME type to possible targets for all + // supported imports. + ImportFormats map[string][]string `json:"importFormats,omitempty"` + + // Kind: This is always drive#about. + Kind string `json:"kind,omitempty"` + + // MaxImportSizes: A map of maximum import sizes by MIME type, in bytes. + MaxImportSizes map[string]string `json:"maxImportSizes,omitempty"` + + // MaxUploadSize: The maximum upload size in bytes. + MaxUploadSize int64 `json:"maxUploadSize,omitempty,string"` + + // StorageQuota: The user's storage quota limits and usage. All fields + // are measured in bytes. + StorageQuota *AboutStorageQuota `json:"storageQuota,omitempty"` + + // User: The authenticated user. + User *User `json:"user,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AppInstalled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *About) MarshalJSON() ([]byte, error) { + type noMethod About + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// AboutStorageQuota: The user's storage quota limits and usage. All +// fields are measured in bytes. +type AboutStorageQuota struct { + // Limit: The usage limit, if applicable. This will not be present if + // the user has unlimited storage. + Limit int64 `json:"limit,omitempty,string"` + + // Usage: The total usage across all services. + Usage int64 `json:"usage,omitempty,string"` + + // UsageInDrive: The usage by all files in Google Drive. + UsageInDrive int64 `json:"usageInDrive,omitempty,string"` + + // UsageInDriveTrash: The usage by trashed files in Google Drive. + UsageInDriveTrash int64 `json:"usageInDriveTrash,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "Limit") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AboutStorageQuota) MarshalJSON() ([]byte, error) { + type noMethod AboutStorageQuota + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Change: A change to a file. +type Change struct { + // File: The updated state of the file. Present if the file has not been + // removed. + File *File `json:"file,omitempty"` + + // FileId: The ID of the file which has changed. + FileId string `json:"fileId,omitempty"` + + // Kind: This is always drive#change. + Kind string `json:"kind,omitempty"` + + // Removed: Whether the file has been removed from the view of the + // changes list, for example by deletion or lost access. + Removed bool `json:"removed,omitempty"` + + // Time: The time of this change (RFC 3339 date-time). + Time string `json:"time,omitempty"` + + // ForceSendFields is a list of field names (e.g. "File") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Change) MarshalJSON() ([]byte, error) { + type noMethod Change + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ChangeList: A list of changes for a user. +type ChangeList struct { + // Changes: The page of changes. + Changes []*Change `json:"changes,omitempty"` + + // Kind: This is always drive#changeList. + Kind string `json:"kind,omitempty"` + + // NewStartPageToken: The starting page token for future changes. This + // will be present only if the end of the current changes list has been + // reached. + NewStartPageToken string `json:"newStartPageToken,omitempty"` + + // NextPageToken: The page token for the next page of changes. This will + // be absent if the end of the current changes list has been reached. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Changes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ChangeList) MarshalJSON() ([]byte, error) { + type noMethod ChangeList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Channel: An notification channel used to watch for resource changes. +type Channel struct { + // Address: The address where notifications are delivered for this + // channel. + Address string `json:"address,omitempty"` + + // Expiration: Date and time of notification channel expiration, + // expressed as a Unix timestamp, in milliseconds. Optional. + Expiration int64 `json:"expiration,omitempty,string"` + + // Id: A UUID or similar unique string that identifies this channel. + Id string `json:"id,omitempty"` + + // Kind: Identifies this as a notification channel used to watch for + // changes to a resource. Value: the fixed string "api#channel". + Kind string `json:"kind,omitempty"` + + // Params: Additional parameters controlling delivery channel behavior. + // Optional. + Params map[string]string `json:"params,omitempty"` + + // Payload: A Boolean value to indicate whether payload is wanted. + // Optional. + Payload bool `json:"payload,omitempty"` + + // ResourceId: An opaque ID that identifies the resource being watched + // on this channel. Stable across different API versions. + ResourceId string `json:"resourceId,omitempty"` + + // ResourceUri: A version-specific identifier for the watched resource. + ResourceUri string `json:"resourceUri,omitempty"` + + // Token: An arbitrary string delivered to the target address with each + // notification delivered over this channel. Optional. + Token string `json:"token,omitempty"` + + // Type: The type of delivery mechanism used for this channel. + Type string `json:"type,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Address") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Channel) MarshalJSON() ([]byte, error) { + type noMethod Channel + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Comment: A comment on a file. +type Comment struct { + // Anchor: A region of the document represented as a JSON string. See + // anchor documentation for details on how to define and interpret + // anchor properties. + Anchor string `json:"anchor,omitempty"` + + // Author: The user who created the comment. + Author *User `json:"author,omitempty"` + + // Content: The plain text content of the comment. This field is used + // for setting the content, while htmlContent should be displayed. + Content string `json:"content,omitempty"` + + // CreatedTime: The time at which the comment was created (RFC 3339 + // date-time). + CreatedTime string `json:"createdTime,omitempty"` + + // Deleted: Whether the comment has been deleted. A deleted comment has + // no content. + Deleted bool `json:"deleted,omitempty"` + + // HtmlContent: The content of the comment with HTML formatting. + HtmlContent string `json:"htmlContent,omitempty"` + + // Id: The ID of the comment. + Id string `json:"id,omitempty"` + + // Kind: This is always drive#comment. + Kind string `json:"kind,omitempty"` + + // ModifiedTime: The last time the comment or any of its replies was + // modified (RFC 3339 date-time). + ModifiedTime string `json:"modifiedTime,omitempty"` + + // QuotedFileContent: The file content to which the comment refers, + // typically within the anchor region. For a text file, for example, + // this would be the text at the location of the comment. + QuotedFileContent *CommentQuotedFileContent `json:"quotedFileContent,omitempty"` + + // Replies: The full list of replies to the comment in chronological + // order. + Replies []*Reply `json:"replies,omitempty"` + + // Resolved: Whether the comment has been resolved by one of its + // replies. + Resolved bool `json:"resolved,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Anchor") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Comment) MarshalJSON() ([]byte, error) { + type noMethod Comment + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// CommentQuotedFileContent: The file content to which the comment +// refers, typically within the anchor region. For a text file, for +// example, this would be the text at the location of the comment. +type CommentQuotedFileContent struct { + // MimeType: The MIME type of the quoted content. + MimeType string `json:"mimeType,omitempty"` + + // Value: The quoted content itself. This is interpreted as plain text + // if set through the API. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MimeType") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *CommentQuotedFileContent) MarshalJSON() ([]byte, error) { + type noMethod CommentQuotedFileContent + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// CommentList: A list of comments on a file. +type CommentList struct { + // Comments: The page of comments. + Comments []*Comment `json:"comments,omitempty"` + + // Kind: This is always drive#commentList. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The page token for the next page of comments. This + // will be absent if the end of the comments list has been reached. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Comments") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *CommentList) MarshalJSON() ([]byte, error) { + type noMethod CommentList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// File: The metadata for a file. +type File struct { + // AppProperties: A collection of arbitrary key-value pairs which are + // private to the requesting app. + // Entries with null values are cleared in update and copy requests. + AppProperties map[string]string `json:"appProperties,omitempty"` + + // Capabilities: Capabilities the current user has on the file. + Capabilities *FileCapabilities `json:"capabilities,omitempty"` + + // ContentHints: Additional information about the content of the file. + // These fields are never populated in responses. + ContentHints *FileContentHints `json:"contentHints,omitempty"` + + // CreatedTime: The time at which the file was created (RFC 3339 + // date-time). + CreatedTime string `json:"createdTime,omitempty"` + + // Description: A short description of the file. + Description string `json:"description,omitempty"` + + // ExplicitlyTrashed: Whether the file has been explicitly trashed, as + // opposed to recursively trashed from a parent folder. + ExplicitlyTrashed bool `json:"explicitlyTrashed,omitempty"` + + // FileExtension: The final component of fullFileExtension. This is only + // available for files with binary content in Drive. + FileExtension string `json:"fileExtension,omitempty"` + + // FolderColorRgb: The color for a folder as an RGB hex string. The + // supported colors are published in the folderColorPalette field of the + // About resource. + // If an unsupported color is specified, the closest color in the + // palette will be used instead. + FolderColorRgb string `json:"folderColorRgb,omitempty"` + + // FullFileExtension: The full file extension extracted from the name + // field. May contain multiple concatenated extensions, such as + // "tar.gz". This is only available for files with binary content in + // Drive. + // This is automatically updated when the name field changes, however it + // is not cleared if the new name does not contain a valid extension. + FullFileExtension string `json:"fullFileExtension,omitempty"` + + // HeadRevisionId: The ID of the file's head revision. This is currently + // only available for files with binary content in Drive. + HeadRevisionId string `json:"headRevisionId,omitempty"` + + // IconLink: A static, unauthenticated link to the file's icon. + IconLink string `json:"iconLink,omitempty"` + + // Id: The ID of the file. + Id string `json:"id,omitempty"` + + // ImageMediaMetadata: Additional metadata about image media, if + // available. + ImageMediaMetadata *FileImageMediaMetadata `json:"imageMediaMetadata,omitempty"` + + // Kind: This is always drive#file. + Kind string `json:"kind,omitempty"` + + // LastModifyingUser: The last user to modify the file. + LastModifyingUser *User `json:"lastModifyingUser,omitempty"` + + // Md5Checksum: The MD5 checksum for the content of the file. This is + // only applicable to files with binary content in Drive. + Md5Checksum string `json:"md5Checksum,omitempty"` + + // MimeType: The MIME type of the file. + // Drive will attempt to automatically detect an appropriate value from + // uploaded content if no value is provided. The value cannot be changed + // unless a new revision is uploaded. + // If a file is created with a Google Doc MIME type, the uploaded + // content will be imported if possible. The supported import formats + // are published in the About resource. + MimeType string `json:"mimeType,omitempty"` + + // ModifiedByMeTime: The last time the file was modified by the user + // (RFC 3339 date-time). + ModifiedByMeTime string `json:"modifiedByMeTime,omitempty"` + + // ModifiedTime: The last time the file was modified by anyone (RFC 3339 + // date-time). + // Note that setting modifiedTime will also update modifiedByMeTime for + // the user. + ModifiedTime string `json:"modifiedTime,omitempty"` + + // Name: The name of the file. This is not necessarily unique within a + // folder. + Name string `json:"name,omitempty"` + + // OriginalFilename: The original filename of the uploaded content if + // available, or else the original value of the name field. This is only + // available for files with binary content in Drive. + OriginalFilename string `json:"originalFilename,omitempty"` + + // OwnedByMe: Whether the user owns the file. + OwnedByMe bool `json:"ownedByMe,omitempty"` + + // Owners: The owners of the file. Currently, only certain legacy files + // may have more than one owner. + Owners []*User `json:"owners,omitempty"` + + // Parents: The IDs of the parent folders which contain the file. + // If not specified as part of a create request, the file will be placed + // directly in the My Drive folder. Update requests must use the + // addParents and removeParents parameters to modify the values. + Parents []string `json:"parents,omitempty"` + + // Permissions: The full list of permissions for the file. This is only + // available if the requesting user can share the file. + Permissions []*Permission `json:"permissions,omitempty"` + + // Properties: A collection of arbitrary key-value pairs which are + // visible to all apps. + // Entries with null values are cleared in update and copy requests. + Properties map[string]string `json:"properties,omitempty"` + + // QuotaBytesUsed: The number of storage quota bytes used by the file. + // This includes the head revision as well as previous revisions with + // keepForever enabled. + QuotaBytesUsed int64 `json:"quotaBytesUsed,omitempty,string"` + + // Shared: Whether the file has been shared. + Shared bool `json:"shared,omitempty"` + + // SharedWithMeTime: The time at which the file was shared with the + // user, if applicable (RFC 3339 date-time). + SharedWithMeTime string `json:"sharedWithMeTime,omitempty"` + + // SharingUser: The user who shared the file with the requesting user, + // if applicable. + SharingUser *User `json:"sharingUser,omitempty"` + + // Size: The size of the file's content in bytes. This is only + // applicable to files with binary content in Drive. + Size int64 `json:"size,omitempty,string"` + + // Spaces: The list of spaces which contain the file. The currently + // supported values are 'drive', 'appDataFolder' and 'photos'. + Spaces []string `json:"spaces,omitempty"` + + // Starred: Whether the user has starred the file. + Starred bool `json:"starred,omitempty"` + + // ThumbnailLink: A short-lived link to the file's thumbnail, if + // available. Typically lasts on the order of hours. + ThumbnailLink string `json:"thumbnailLink,omitempty"` + + // Trashed: Whether the file has been trashed, either explicitly or from + // a trashed parent folder. Only the owner may trash a file, and other + // users cannot see files in the owner's trash. + Trashed bool `json:"trashed,omitempty"` + + // Version: A monotonically increasing version number for the file. This + // reflects every change made to the file on the server, even those not + // visible to the user. + Version int64 `json:"version,omitempty,string"` + + // VideoMediaMetadata: Additional metadata about video media. This may + // not be available immediately upon upload. + VideoMediaMetadata *FileVideoMediaMetadata `json:"videoMediaMetadata,omitempty"` + + // ViewedByMe: Whether the file has been viewed by this user. + ViewedByMe bool `json:"viewedByMe,omitempty"` + + // ViewedByMeTime: The last time the file was viewed by the user (RFC + // 3339 date-time). + ViewedByMeTime string `json:"viewedByMeTime,omitempty"` + + // ViewersCanCopyContent: Whether users with only reader or commenter + // permission can copy the file's content. This affects copy, download, + // and print operations. + ViewersCanCopyContent bool `json:"viewersCanCopyContent,omitempty"` + + // WebContentLink: A link for downloading the content of the file in a + // browser. This is only available for files with binary content in + // Drive. + WebContentLink string `json:"webContentLink,omitempty"` + + // WebViewLink: A link for opening the file in a relevant Google editor + // or viewer in a browser. + WebViewLink string `json:"webViewLink,omitempty"` + + // WritersCanShare: Whether users with only writer permission can modify + // the file's permissions. + WritersCanShare bool `json:"writersCanShare,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AppProperties") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *File) MarshalJSON() ([]byte, error) { + type noMethod File + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// FileCapabilities: Capabilities the current user has on the file. +type FileCapabilities struct { + // CanComment: Whether the user can comment on the file. + CanComment bool `json:"canComment,omitempty"` + + // CanCopy: Whether the user can copy the file. + CanCopy bool `json:"canCopy,omitempty"` + + // CanEdit: Whether the user can edit the file's content. + CanEdit bool `json:"canEdit,omitempty"` + + // CanReadRevisions: Whether the current user has read access to the + // Revisions resource of the file. + CanReadRevisions bool `json:"canReadRevisions,omitempty"` + + // CanShare: Whether the user can modify the file's permissions and + // sharing settings. + CanShare bool `json:"canShare,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CanComment") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *FileCapabilities) MarshalJSON() ([]byte, error) { + type noMethod FileCapabilities + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// FileContentHints: Additional information about the content of the +// file. These fields are never populated in responses. +type FileContentHints struct { + // IndexableText: Text to be indexed for the file to improve fullText + // queries. This is limited to 128KB in length and may contain HTML + // elements. + IndexableText string `json:"indexableText,omitempty"` + + // Thumbnail: A thumbnail for the file. This will only be used if Drive + // cannot generate a standard thumbnail. + Thumbnail *FileContentHintsThumbnail `json:"thumbnail,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IndexableText") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *FileContentHints) MarshalJSON() ([]byte, error) { + type noMethod FileContentHints + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// FileContentHintsThumbnail: A thumbnail for the file. This will only +// be used if Drive cannot generate a standard thumbnail. +type FileContentHintsThumbnail struct { + // Image: The thumbnail data encoded with URL-safe Base64 (RFC 4648 + // section 5). + Image string `json:"image,omitempty"` + + // MimeType: The MIME type of the thumbnail. + MimeType string `json:"mimeType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Image") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *FileContentHintsThumbnail) MarshalJSON() ([]byte, error) { + type noMethod FileContentHintsThumbnail + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// FileImageMediaMetadata: Additional metadata about image media, if +// available. +type FileImageMediaMetadata struct { + // Aperture: The aperture used to create the photo (f-number). + Aperture float64 `json:"aperture,omitempty"` + + // CameraMake: The make of the camera used to create the photo. + CameraMake string `json:"cameraMake,omitempty"` + + // CameraModel: The model of the camera used to create the photo. + CameraModel string `json:"cameraModel,omitempty"` + + // ColorSpace: The color space of the photo. + ColorSpace string `json:"colorSpace,omitempty"` + + // ExposureBias: The exposure bias of the photo (APEX value). + ExposureBias float64 `json:"exposureBias,omitempty"` + + // ExposureMode: The exposure mode used to create the photo. + ExposureMode string `json:"exposureMode,omitempty"` + + // ExposureTime: The length of the exposure, in seconds. + ExposureTime float64 `json:"exposureTime,omitempty"` + + // FlashUsed: Whether a flash was used to create the photo. + FlashUsed bool `json:"flashUsed,omitempty"` + + // FocalLength: The focal length used to create the photo, in + // millimeters. + FocalLength float64 `json:"focalLength,omitempty"` + + // Height: The height of the image in pixels. + Height int64 `json:"height,omitempty"` + + // IsoSpeed: The ISO speed used to create the photo. + IsoSpeed int64 `json:"isoSpeed,omitempty"` + + // Lens: The lens used to create the photo. + Lens string `json:"lens,omitempty"` + + // Location: Geographic location information stored in the image. + Location *FileImageMediaMetadataLocation `json:"location,omitempty"` + + // MaxApertureValue: The smallest f-number of the lens at the focal + // length used to create the photo (APEX value). + MaxApertureValue float64 `json:"maxApertureValue,omitempty"` + + // MeteringMode: The metering mode used to create the photo. + MeteringMode string `json:"meteringMode,omitempty"` + + // Rotation: The rotation in clockwise degrees from the image's original + // orientation. + Rotation int64 `json:"rotation,omitempty"` + + // Sensor: The type of sensor used to create the photo. + Sensor string `json:"sensor,omitempty"` + + // SubjectDistance: The distance to the subject of the photo, in meters. + SubjectDistance int64 `json:"subjectDistance,omitempty"` + + // Time: The date and time the photo was taken (EXIF DateTime). + Time string `json:"time,omitempty"` + + // WhiteBalance: The white balance mode used to create the photo. + WhiteBalance string `json:"whiteBalance,omitempty"` + + // Width: The width of the image in pixels. + Width int64 `json:"width,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Aperture") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *FileImageMediaMetadata) MarshalJSON() ([]byte, error) { + type noMethod FileImageMediaMetadata + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// FileImageMediaMetadataLocation: Geographic location information +// stored in the image. +type FileImageMediaMetadataLocation struct { + // Altitude: The altitude stored in the image. + Altitude float64 `json:"altitude,omitempty"` + + // Latitude: The latitude stored in the image. + Latitude float64 `json:"latitude,omitempty"` + + // Longitude: The longitude stored in the image. + Longitude float64 `json:"longitude,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Altitude") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *FileImageMediaMetadataLocation) MarshalJSON() ([]byte, error) { + type noMethod FileImageMediaMetadataLocation + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// FileVideoMediaMetadata: Additional metadata about video media. This +// may not be available immediately upon upload. +type FileVideoMediaMetadata struct { + // DurationMillis: The duration of the video in milliseconds. + DurationMillis int64 `json:"durationMillis,omitempty,string"` + + // Height: The height of the video in pixels. + Height int64 `json:"height,omitempty"` + + // Width: The width of the video in pixels. + Width int64 `json:"width,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DurationMillis") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *FileVideoMediaMetadata) MarshalJSON() ([]byte, error) { + type noMethod FileVideoMediaMetadata + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// FileList: A list of files. +type FileList struct { + // Files: The page of files. + Files []*File `json:"files,omitempty"` + + // Kind: This is always drive#fileList. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The page token for the next page of files. This will + // be absent if the end of the files list has been reached. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Files") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *FileList) MarshalJSON() ([]byte, error) { + type noMethod FileList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// GeneratedIds: A list of generated file IDs which can be provided in +// create requests. +type GeneratedIds struct { + // Ids: The IDs generated for the requesting user in the specified + // space. + Ids []string `json:"ids,omitempty"` + + // Kind: This is always drive#generatedIds + Kind string `json:"kind,omitempty"` + + // Space: The type of file that can be created with these IDs. + Space string `json:"space,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Ids") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *GeneratedIds) MarshalJSON() ([]byte, error) { + type noMethod GeneratedIds + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Permission: A permission for a file. A permission grants a user, +// group, domain or the world access to a file or a folder hierarchy. +type Permission struct { + // AllowFileDiscovery: Whether the permission allows the file to be + // discovered through search. This is only applicable for permissions of + // type domain or anyone. + AllowFileDiscovery bool `json:"allowFileDiscovery,omitempty"` + + // DisplayName: A displayable name for users, groups or domains. + DisplayName string `json:"displayName,omitempty"` + + // Domain: The domain to which this permission refers. + Domain string `json:"domain,omitempty"` + + // EmailAddress: The email address of the user or group to which this + // permission refers. + EmailAddress string `json:"emailAddress,omitempty"` + + // Id: The ID of this permission. This is a unique identifier for the + // grantee, and is published in User resources as permissionId. + Id string `json:"id,omitempty"` + + // Kind: This is always drive#permission. + Kind string `json:"kind,omitempty"` + + // PhotoLink: A link to the user's profile photo, if available. + PhotoLink string `json:"photoLink,omitempty"` + + // Role: The role granted by this permission. Valid values are: + // - owner + // - writer + // - commenter + // - reader + Role string `json:"role,omitempty"` + + // Type: The type of the grantee. Valid values are: + // - user + // - group + // - domain + // - anyone + Type string `json:"type,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AllowFileDiscovery") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Permission) MarshalJSON() ([]byte, error) { + type noMethod Permission + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// PermissionList: A list of permissions for a file. +type PermissionList struct { + // Kind: This is always drive#permissionList. + Kind string `json:"kind,omitempty"` + + // Permissions: The full list of permissions. + Permissions []*Permission `json:"permissions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *PermissionList) MarshalJSON() ([]byte, error) { + type noMethod PermissionList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Reply: A reply to a comment on a file. +type Reply struct { + // Action: The action the reply performed to the parent comment. Valid + // values are: + // - resolve + // - reopen + Action string `json:"action,omitempty"` + + // Author: The user who created the reply. + Author *User `json:"author,omitempty"` + + // Content: The plain text content of the reply. This field is used for + // setting the content, while htmlContent should be displayed. This is + // required on creates if no action is specified. + Content string `json:"content,omitempty"` + + // CreatedTime: The time at which the reply was created (RFC 3339 + // date-time). + CreatedTime string `json:"createdTime,omitempty"` + + // Deleted: Whether the reply has been deleted. A deleted reply has no + // content. + Deleted bool `json:"deleted,omitempty"` + + // HtmlContent: The content of the reply with HTML formatting. + HtmlContent string `json:"htmlContent,omitempty"` + + // Id: The ID of the reply. + Id string `json:"id,omitempty"` + + // Kind: This is always drive#reply. + Kind string `json:"kind,omitempty"` + + // ModifiedTime: The last time the reply was modified (RFC 3339 + // date-time). + ModifiedTime string `json:"modifiedTime,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Action") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Reply) MarshalJSON() ([]byte, error) { + type noMethod Reply + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ReplyList: A list of replies to a comment on a file. +type ReplyList struct { + // Kind: This is always drive#replyList. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The page token for the next page of replies. This will + // be absent if the end of the replies list has been reached. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Replies: The page of replies. + Replies []*Reply `json:"replies,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ReplyList) MarshalJSON() ([]byte, error) { + type noMethod ReplyList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Revision: The metadata for a revision to a file. +type Revision struct { + // Id: The ID of the revision. + Id string `json:"id,omitempty"` + + // KeepForever: Whether to keep this revision forever, even if it is no + // longer the head revision. If not set, the revision will be + // automatically purged 30 days after newer content is uploaded. This + // can be set on a maximum of 200 revisions for a file. + // This field is only applicable to files with binary content in Drive. + KeepForever bool `json:"keepForever,omitempty"` + + // Kind: This is always drive#revision. + Kind string `json:"kind,omitempty"` + + // LastModifyingUser: The last user to modify this revision. + LastModifyingUser *User `json:"lastModifyingUser,omitempty"` + + // Md5Checksum: The MD5 checksum of the revision's content. This is only + // applicable to files with binary content in Drive. + Md5Checksum string `json:"md5Checksum,omitempty"` + + // MimeType: The MIME type of the revision. + MimeType string `json:"mimeType,omitempty"` + + // ModifiedTime: The last time the revision was modified (RFC 3339 + // date-time). + ModifiedTime string `json:"modifiedTime,omitempty"` + + // OriginalFilename: The original filename used to create this revision. + // This is only applicable to files with binary content in Drive. + OriginalFilename string `json:"originalFilename,omitempty"` + + // PublishAuto: Whether subsequent revisions will be automatically + // republished. This is only applicable to Google Docs. + PublishAuto bool `json:"publishAuto,omitempty"` + + // Published: Whether this revision is published. This is only + // applicable to Google Docs. + Published bool `json:"published,omitempty"` + + // PublishedOutsideDomain: Whether this revision is published outside + // the domain. This is only applicable to Google Docs. + PublishedOutsideDomain bool `json:"publishedOutsideDomain,omitempty"` + + // Size: The size of the revision's content in bytes. This is only + // applicable to files with binary content in Drive. + Size int64 `json:"size,omitempty,string"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Revision) MarshalJSON() ([]byte, error) { + type noMethod Revision + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// RevisionList: A list of revisions of a file. +type RevisionList struct { + // Kind: This is always drive#revisionList. + Kind string `json:"kind,omitempty"` + + // Revisions: The full list of revisions. + Revisions []*Revision `json:"revisions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *RevisionList) MarshalJSON() ([]byte, error) { + type noMethod RevisionList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type StartPageToken struct { + // Kind: This is always drive#startPageToken. + Kind string `json:"kind,omitempty"` + + // StartPageToken: The starting page token for listing changes. + StartPageToken string `json:"startPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *StartPageToken) MarshalJSON() ([]byte, error) { + type noMethod StartPageToken + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// User: Information about a Drive user. +type User struct { + // DisplayName: A plain text displayable name for this user. + DisplayName string `json:"displayName,omitempty"` + + // EmailAddress: The email address of the user. This may not be present + // in certain contexts if the user has not made their email address + // visible to the requester. + EmailAddress string `json:"emailAddress,omitempty"` + + // Kind: This is always drive#user. + Kind string `json:"kind,omitempty"` + + // Me: Whether this user is the requesting user. + Me bool `json:"me,omitempty"` + + // PermissionId: The user's ID as visible in Permission resources. + PermissionId string `json:"permissionId,omitempty"` + + // PhotoLink: A link to the user's profile photo, if available. + PhotoLink string `json:"photoLink,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DisplayName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *User) MarshalJSON() ([]byte, error) { + type noMethod User + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// method id "drive.about.get": + +type AboutGetCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Gets information about the user, the user's Drive, and system +// capabilities. +func (r *AboutService) Get() *AboutGetCall { + c := &AboutGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AboutGetCall) Fields(s ...googleapi.Field) *AboutGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AboutGetCall) IfNoneMatch(entityTag string) *AboutGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AboutGetCall) Context(ctx context.Context) *AboutGetCall { + c.ctx_ = ctx + return c +} + +func (c *AboutGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "about") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.about.get" call. +// Exactly one of *About or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *About.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *AboutGetCall) Do(opts ...googleapi.CallOption) (*About, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &About{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets information about the user, the user's Drive, and system capabilities.", + // "httpMethod": "GET", + // "id": "drive.about.get", + // "path": "about", + // "response": { + // "$ref": "About" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.metadata", + // "https://www.googleapis.com/auth/drive.metadata.readonly", + // "https://www.googleapis.com/auth/drive.photos.readonly", + // "https://www.googleapis.com/auth/drive.readonly" + // ] + // } + +} + +// method id "drive.changes.getStartPageToken": + +type ChangesGetStartPageTokenCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// GetStartPageToken: Gets the starting pageToken for listing future +// changes. +func (r *ChangesService) GetStartPageToken() *ChangesGetStartPageTokenCall { + c := &ChangesGetStartPageTokenCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChangesGetStartPageTokenCall) Fields(s ...googleapi.Field) *ChangesGetStartPageTokenCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ChangesGetStartPageTokenCall) IfNoneMatch(entityTag string) *ChangesGetStartPageTokenCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChangesGetStartPageTokenCall) Context(ctx context.Context) *ChangesGetStartPageTokenCall { + c.ctx_ = ctx + return c +} + +func (c *ChangesGetStartPageTokenCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "changes/startPageToken") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.changes.getStartPageToken" call. +// Exactly one of *StartPageToken or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *StartPageToken.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ChangesGetStartPageTokenCall) Do(opts ...googleapi.CallOption) (*StartPageToken, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &StartPageToken{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the starting pageToken for listing future changes.", + // "httpMethod": "GET", + // "id": "drive.changes.getStartPageToken", + // "path": "changes/startPageToken", + // "response": { + // "$ref": "StartPageToken" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.metadata", + // "https://www.googleapis.com/auth/drive.metadata.readonly", + // "https://www.googleapis.com/auth/drive.photos.readonly", + // "https://www.googleapis.com/auth/drive.readonly" + // ] + // } + +} + +// method id "drive.changes.list": + +type ChangesListCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Lists changes for a user. +func (r *ChangesService) List(pageToken string) *ChangesListCall { + c := &ChangesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// IncludeRemoved sets the optional parameter "includeRemoved": Whether +// to include changes indicating that items have left the view of the +// changes list, for example by deletion or lost access. +func (c *ChangesListCall) IncludeRemoved(includeRemoved bool) *ChangesListCall { + c.urlParams_.Set("includeRemoved", fmt.Sprint(includeRemoved)) + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of changes to return per page. +func (c *ChangesListCall) PageSize(pageSize int64) *ChangesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// RestrictToMyDrive sets the optional parameter "restrictToMyDrive": +// Whether to restrict the results to changes inside the My Drive +// hierarchy. This omits changes to files such as those in the +// Application Data folder or shared files which have not been added to +// My Drive. +func (c *ChangesListCall) RestrictToMyDrive(restrictToMyDrive bool) *ChangesListCall { + c.urlParams_.Set("restrictToMyDrive", fmt.Sprint(restrictToMyDrive)) + return c +} + +// Spaces sets the optional parameter "spaces": A comma-separated list +// of spaces to query within the user corpus. Supported values are +// 'drive', 'appDataFolder' and 'photos'. +func (c *ChangesListCall) Spaces(spaces string) *ChangesListCall { + c.urlParams_.Set("spaces", spaces) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChangesListCall) Fields(s ...googleapi.Field) *ChangesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ChangesListCall) IfNoneMatch(entityTag string) *ChangesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChangesListCall) Context(ctx context.Context) *ChangesListCall { + c.ctx_ = ctx + return c +} + +func (c *ChangesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "changes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.changes.list" call. +// Exactly one of *ChangeList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ChangeList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ChangesListCall) Do(opts ...googleapi.CallOption) (*ChangeList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ChangeList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists changes for a user.", + // "httpMethod": "GET", + // "id": "drive.changes.list", + // "parameterOrder": [ + // "pageToken" + // ], + // "parameters": { + // "includeRemoved": { + // "default": "true", + // "description": "Whether to include changes indicating that items have left the view of the changes list, for example by deletion or lost access.", + // "location": "query", + // "type": "boolean" + // }, + // "pageSize": { + // "default": "100", + // "description": "The maximum number of changes to return per page.", + // "format": "int32", + // "location": "query", + // "maximum": "1000", + // "minimum": "1", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response or to the response from the getStartPageToken method.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "restrictToMyDrive": { + // "default": "false", + // "description": "Whether to restrict the results to changes inside the My Drive hierarchy. This omits changes to files such as those in the Application Data folder or shared files which have not been added to My Drive.", + // "location": "query", + // "type": "boolean" + // }, + // "spaces": { + // "default": "drive", + // "description": "A comma-separated list of spaces to query within the user corpus. Supported values are 'drive', 'appDataFolder' and 'photos'.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "changes", + // "response": { + // "$ref": "ChangeList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.metadata", + // "https://www.googleapis.com/auth/drive.metadata.readonly", + // "https://www.googleapis.com/auth/drive.photos.readonly", + // "https://www.googleapis.com/auth/drive.readonly" + // ], + // "supportsSubscription": true + // } + +} + +// method id "drive.changes.watch": + +type ChangesWatchCall struct { + s *Service + channel *Channel + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Watch: Subscribes to changes for a user. +func (r *ChangesService) Watch(pageToken string, channel *Channel) *ChangesWatchCall { + c := &ChangesWatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.urlParams_.Set("pageToken", pageToken) + c.channel = channel + return c +} + +// IncludeRemoved sets the optional parameter "includeRemoved": Whether +// to include changes indicating that items have left the view of the +// changes list, for example by deletion or lost access. +func (c *ChangesWatchCall) IncludeRemoved(includeRemoved bool) *ChangesWatchCall { + c.urlParams_.Set("includeRemoved", fmt.Sprint(includeRemoved)) + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of changes to return per page. +func (c *ChangesWatchCall) PageSize(pageSize int64) *ChangesWatchCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// RestrictToMyDrive sets the optional parameter "restrictToMyDrive": +// Whether to restrict the results to changes inside the My Drive +// hierarchy. This omits changes to files such as those in the +// Application Data folder or shared files which have not been added to +// My Drive. +func (c *ChangesWatchCall) RestrictToMyDrive(restrictToMyDrive bool) *ChangesWatchCall { + c.urlParams_.Set("restrictToMyDrive", fmt.Sprint(restrictToMyDrive)) + return c +} + +// Spaces sets the optional parameter "spaces": A comma-separated list +// of spaces to query within the user corpus. Supported values are +// 'drive', 'appDataFolder' and 'photos'. +func (c *ChangesWatchCall) Spaces(spaces string) *ChangesWatchCall { + c.urlParams_.Set("spaces", spaces) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChangesWatchCall) Fields(s ...googleapi.Field) *ChangesWatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChangesWatchCall) Context(ctx context.Context) *ChangesWatchCall { + c.ctx_ = ctx + return c +} + +func (c *ChangesWatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "changes/watch") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.changes.watch" call. +// Exactly one of *Channel or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Channel.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ChangesWatchCall) Do(opts ...googleapi.CallOption) (*Channel, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Channel{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Subscribes to changes for a user.", + // "httpMethod": "POST", + // "id": "drive.changes.watch", + // "parameterOrder": [ + // "pageToken" + // ], + // "parameters": { + // "includeRemoved": { + // "default": "true", + // "description": "Whether to include changes indicating that items have left the view of the changes list, for example by deletion or lost access.", + // "location": "query", + // "type": "boolean" + // }, + // "pageSize": { + // "default": "100", + // "description": "The maximum number of changes to return per page.", + // "format": "int32", + // "location": "query", + // "maximum": "1000", + // "minimum": "1", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response or to the response from the getStartPageToken method.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "restrictToMyDrive": { + // "default": "false", + // "description": "Whether to restrict the results to changes inside the My Drive hierarchy. This omits changes to files such as those in the Application Data folder or shared files which have not been added to My Drive.", + // "location": "query", + // "type": "boolean" + // }, + // "spaces": { + // "default": "drive", + // "description": "A comma-separated list of spaces to query within the user corpus. Supported values are 'drive', 'appDataFolder' and 'photos'.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "changes/watch", + // "request": { + // "$ref": "Channel", + // "parameterName": "resource" + // }, + // "response": { + // "$ref": "Channel" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.metadata", + // "https://www.googleapis.com/auth/drive.metadata.readonly", + // "https://www.googleapis.com/auth/drive.photos.readonly", + // "https://www.googleapis.com/auth/drive.readonly" + // ], + // "supportsSubscription": true + // } + +} + +// method id "drive.channels.stop": + +type ChannelsStopCall struct { + s *Service + channel *Channel + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Stop: Stop watching resources through this channel +func (r *ChannelsService) Stop(channel *Channel) *ChannelsStopCall { + c := &ChannelsStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.channel = channel + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChannelsStopCall) Fields(s ...googleapi.Field) *ChannelsStopCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall { + c.ctx_ = ctx + return c +} + +func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.channels.stop" call. +func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Stop watching resources through this channel", + // "httpMethod": "POST", + // "id": "drive.channels.stop", + // "path": "channels/stop", + // "request": { + // "$ref": "Channel", + // "parameterName": "resource" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.metadata", + // "https://www.googleapis.com/auth/drive.metadata.readonly", + // "https://www.googleapis.com/auth/drive.photos.readonly", + // "https://www.googleapis.com/auth/drive.readonly" + // ] + // } + +} + +// method id "drive.comments.create": + +type CommentsCreateCall struct { + s *Service + fileId string + comment *Comment + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Create: Creates a new comment on a file. +func (r *CommentsService) Create(fileId string, comment *Comment) *CommentsCreateCall { + c := &CommentsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.comment = comment + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CommentsCreateCall) Fields(s ...googleapi.Field) *CommentsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CommentsCreateCall) Context(ctx context.Context) *CommentsCreateCall { + c.ctx_ = ctx + return c +} + +func (c *CommentsCreateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.comment) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.comments.create" call. +// Exactly one of *Comment or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Comment.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *CommentsCreateCall) Do(opts ...googleapi.CallOption) (*Comment, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Comment{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new comment on a file.", + // "httpMethod": "POST", + // "id": "drive.comments.create", + // "parameterOrder": [ + // "fileId" + // ], + // "parameters": { + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/comments", + // "request": { + // "$ref": "Comment" + // }, + // "response": { + // "$ref": "Comment" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file" + // ] + // } + +} + +// method id "drive.comments.delete": + +type CommentsDeleteCall struct { + s *Service + fileId string + commentId string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes a comment. +func (r *CommentsService) Delete(fileId string, commentId string) *CommentsDeleteCall { + c := &CommentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.commentId = commentId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CommentsDeleteCall) Fields(s ...googleapi.Field) *CommentsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CommentsDeleteCall) Context(ctx context.Context) *CommentsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *CommentsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "commentId": c.commentId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.comments.delete" call. +func (c *CommentsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Deletes a comment.", + // "httpMethod": "DELETE", + // "id": "drive.comments.delete", + // "parameterOrder": [ + // "fileId", + // "commentId" + // ], + // "parameters": { + // "commentId": { + // "description": "The ID of the comment.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/comments/{commentId}", + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file" + // ] + // } + +} + +// method id "drive.comments.get": + +type CommentsGetCall struct { + s *Service + fileId string + commentId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Gets a comment by ID. +func (r *CommentsService) Get(fileId string, commentId string) *CommentsGetCall { + c := &CommentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.commentId = commentId + return c +} + +// IncludeDeleted sets the optional parameter "includeDeleted": Whether +// to return deleted comments. Deleted comments will not include their +// original content. +func (c *CommentsGetCall) IncludeDeleted(includeDeleted bool) *CommentsGetCall { + c.urlParams_.Set("includeDeleted", fmt.Sprint(includeDeleted)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CommentsGetCall) Fields(s ...googleapi.Field) *CommentsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *CommentsGetCall) IfNoneMatch(entityTag string) *CommentsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CommentsGetCall) Context(ctx context.Context) *CommentsGetCall { + c.ctx_ = ctx + return c +} + +func (c *CommentsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "commentId": c.commentId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.comments.get" call. +// Exactly one of *Comment or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Comment.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *CommentsGetCall) Do(opts ...googleapi.CallOption) (*Comment, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Comment{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a comment by ID.", + // "httpMethod": "GET", + // "id": "drive.comments.get", + // "parameterOrder": [ + // "fileId", + // "commentId" + // ], + // "parameters": { + // "commentId": { + // "description": "The ID of the comment.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "includeDeleted": { + // "default": "false", + // "description": "Whether to return deleted comments. Deleted comments will not include their original content.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "files/{fileId}/comments/{commentId}", + // "response": { + // "$ref": "Comment" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.readonly" + // ] + // } + +} + +// method id "drive.comments.list": + +type CommentsListCall struct { + s *Service + fileId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Lists a file's comments. +func (r *CommentsService) List(fileId string) *CommentsListCall { + c := &CommentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + return c +} + +// IncludeDeleted sets the optional parameter "includeDeleted": Whether +// to include deleted comments. Deleted comments will not include their +// original content. +func (c *CommentsListCall) IncludeDeleted(includeDeleted bool) *CommentsListCall { + c.urlParams_.Set("includeDeleted", fmt.Sprint(includeDeleted)) + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of comments to return per page. +func (c *CommentsListCall) PageSize(pageSize int64) *CommentsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The token for +// continuing a previous list request on the next page. This should be +// set to the value of 'nextPageToken' from the previous response. +func (c *CommentsListCall) PageToken(pageToken string) *CommentsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// StartModifiedTime sets the optional parameter "startModifiedTime": +// The minimum value of 'modifiedTime' for the result comments (RFC 3339 +// date-time). +func (c *CommentsListCall) StartModifiedTime(startModifiedTime string) *CommentsListCall { + c.urlParams_.Set("startModifiedTime", startModifiedTime) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CommentsListCall) Fields(s ...googleapi.Field) *CommentsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *CommentsListCall) IfNoneMatch(entityTag string) *CommentsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CommentsListCall) Context(ctx context.Context) *CommentsListCall { + c.ctx_ = ctx + return c +} + +func (c *CommentsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.comments.list" call. +// Exactly one of *CommentList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *CommentList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *CommentsListCall) Do(opts ...googleapi.CallOption) (*CommentList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &CommentList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists a file's comments.", + // "httpMethod": "GET", + // "id": "drive.comments.list", + // "parameterOrder": [ + // "fileId" + // ], + // "parameters": { + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "includeDeleted": { + // "default": "false", + // "description": "Whether to include deleted comments. Deleted comments will not include their original content.", + // "location": "query", + // "type": "boolean" + // }, + // "pageSize": { + // "default": "20", + // "description": "The maximum number of comments to return per page.", + // "format": "int32", + // "location": "query", + // "maximum": "100", + // "minimum": "1", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.", + // "location": "query", + // "type": "string" + // }, + // "startModifiedTime": { + // "description": "The minimum value of 'modifiedTime' for the result comments (RFC 3339 date-time).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "files/{fileId}/comments", + // "response": { + // "$ref": "CommentList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *CommentsListCall) Pages(ctx context.Context, f func(*CommentList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "drive.comments.update": + +type CommentsUpdateCall struct { + s *Service + fileId string + commentId string + comment *Comment + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates a comment with patch semantics. +func (r *CommentsService) Update(fileId string, commentId string, comment *Comment) *CommentsUpdateCall { + c := &CommentsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.commentId = commentId + c.comment = comment + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CommentsUpdateCall) Fields(s ...googleapi.Field) *CommentsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CommentsUpdateCall) Context(ctx context.Context) *CommentsUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *CommentsUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.comment) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "commentId": c.commentId, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.comments.update" call. +// Exactly one of *Comment or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Comment.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *CommentsUpdateCall) Do(opts ...googleapi.CallOption) (*Comment, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Comment{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a comment with patch semantics.", + // "httpMethod": "PATCH", + // "id": "drive.comments.update", + // "parameterOrder": [ + // "fileId", + // "commentId" + // ], + // "parameters": { + // "commentId": { + // "description": "The ID of the comment.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/comments/{commentId}", + // "request": { + // "$ref": "Comment" + // }, + // "response": { + // "$ref": "Comment" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file" + // ] + // } + +} + +// method id "drive.files.copy": + +type FilesCopyCall struct { + s *Service + fileId string + file *File + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Copy: Creates a copy of a file and applies any requested updates with +// patch semantics. +func (r *FilesService) Copy(fileId string, file *File) *FilesCopyCall { + c := &FilesCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.file = file + return c +} + +// IgnoreDefaultVisibility sets the optional parameter +// "ignoreDefaultVisibility": Whether to ignore the domain's default +// visibility settings for the created file. Domain administrators can +// choose to make all uploaded files visible to the domain by default; +// this parameter bypasses that behavior for the request. Permissions +// are still inherited from parent folders. +func (c *FilesCopyCall) IgnoreDefaultVisibility(ignoreDefaultVisibility bool) *FilesCopyCall { + c.urlParams_.Set("ignoreDefaultVisibility", fmt.Sprint(ignoreDefaultVisibility)) + return c +} + +// KeepRevisionForever sets the optional parameter +// "keepRevisionForever": Whether to set the 'keepForever' field in the +// new head revision. This is only applicable to files with binary +// content in Drive. +func (c *FilesCopyCall) KeepRevisionForever(keepRevisionForever bool) *FilesCopyCall { + c.urlParams_.Set("keepRevisionForever", fmt.Sprint(keepRevisionForever)) + return c +} + +// OcrLanguage sets the optional parameter "ocrLanguage": A language +// hint for OCR processing during image import (ISO 639-1 code). +func (c *FilesCopyCall) OcrLanguage(ocrLanguage string) *FilesCopyCall { + c.urlParams_.Set("ocrLanguage", ocrLanguage) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FilesCopyCall) Fields(s ...googleapi.Field) *FilesCopyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FilesCopyCall) Context(ctx context.Context) *FilesCopyCall { + c.ctx_ = ctx + return c +} + +func (c *FilesCopyCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.file) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/copy") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.files.copy" call. +// Exactly one of *File or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *File.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *FilesCopyCall) Do(opts ...googleapi.CallOption) (*File, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &File{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a copy of a file and applies any requested updates with patch semantics.", + // "httpMethod": "POST", + // "id": "drive.files.copy", + // "parameterOrder": [ + // "fileId" + // ], + // "parameters": { + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ignoreDefaultVisibility": { + // "default": "false", + // "description": "Whether to ignore the domain's default visibility settings for the created file. Domain administrators can choose to make all uploaded files visible to the domain by default; this parameter bypasses that behavior for the request. Permissions are still inherited from parent folders.", + // "location": "query", + // "type": "boolean" + // }, + // "keepRevisionForever": { + // "default": "false", + // "description": "Whether to set the 'keepForever' field in the new head revision. This is only applicable to files with binary content in Drive.", + // "location": "query", + // "type": "boolean" + // }, + // "ocrLanguage": { + // "description": "A language hint for OCR processing during image import (ISO 639-1 code).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "files/{fileId}/copy", + // "request": { + // "$ref": "File" + // }, + // "response": { + // "$ref": "File" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.photos.readonly" + // ] + // } + +} + +// method id "drive.files.create": + +type FilesCreateCall struct { + s *Service + file *File + urlParams_ gensupport.URLParams + media_ io.Reader + resumableBuffer_ *gensupport.ResumableBuffer + mediaType_ string + mediaSize_ int64 // mediaSize, if known. Used only for calls to progressUpdater_. + progressUpdater_ googleapi.ProgressUpdater + ctx_ context.Context +} + +// Create: Creates a new file. +func (r *FilesService) Create(file *File) *FilesCreateCall { + c := &FilesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.file = file + return c +} + +// IgnoreDefaultVisibility sets the optional parameter +// "ignoreDefaultVisibility": Whether to ignore the domain's default +// visibility settings for the created file. Domain administrators can +// choose to make all uploaded files visible to the domain by default; +// this parameter bypasses that behavior for the request. Permissions +// are still inherited from parent folders. +func (c *FilesCreateCall) IgnoreDefaultVisibility(ignoreDefaultVisibility bool) *FilesCreateCall { + c.urlParams_.Set("ignoreDefaultVisibility", fmt.Sprint(ignoreDefaultVisibility)) + return c +} + +// KeepRevisionForever sets the optional parameter +// "keepRevisionForever": Whether to set the 'keepForever' field in the +// new head revision. This is only applicable to files with binary +// content in Drive. +func (c *FilesCreateCall) KeepRevisionForever(keepRevisionForever bool) *FilesCreateCall { + c.urlParams_.Set("keepRevisionForever", fmt.Sprint(keepRevisionForever)) + return c +} + +// OcrLanguage sets the optional parameter "ocrLanguage": A language +// hint for OCR processing during image import (ISO 639-1 code). +func (c *FilesCreateCall) OcrLanguage(ocrLanguage string) *FilesCreateCall { + c.urlParams_.Set("ocrLanguage", ocrLanguage) + return c +} + +// UseContentAsIndexableText sets the optional parameter +// "useContentAsIndexableText": Whether to use the uploaded content as +// indexable text. +func (c *FilesCreateCall) UseContentAsIndexableText(useContentAsIndexableText bool) *FilesCreateCall { + c.urlParams_.Set("useContentAsIndexableText", fmt.Sprint(useContentAsIndexableText)) + return c +} + +// Media specifies the media to upload in one or more chunks. The chunk +// size may be controlled by supplying a MediaOption generated by +// googleapi.ChunkSize. The chunk size defaults to +// googleapi.DefaultUploadChunkSize.The Content-Type header used in the +// upload request will be determined by sniffing the contents of r, +// unless a MediaOption generated by googleapi.ContentType is +// supplied. +// At most one of Media and ResumableMedia may be set. +func (c *FilesCreateCall) Media(r io.Reader, options ...googleapi.MediaOption) *FilesCreateCall { + opts := googleapi.ProcessMediaOptions(options) + chunkSize := opts.ChunkSize + if !opts.ForceEmptyContentType { + r, c.mediaType_ = gensupport.DetermineContentType(r, opts.ContentType) + } + c.media_, c.resumableBuffer_ = gensupport.PrepareUpload(r, chunkSize) + return c +} + +// ResumableMedia specifies the media to upload in chunks and can be +// canceled with ctx. +// +// Deprecated: use Media instead. +// +// At most one of Media and ResumableMedia may be set. mediaType +// identifies the MIME media type of the upload, such as "image/png". If +// mediaType is "", it will be auto-detected. The provided ctx will +// supersede any context previously provided to the Context method. +func (c *FilesCreateCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *FilesCreateCall { + c.ctx_ = ctx + rdr := gensupport.ReaderAtToReader(r, size) + rdr, c.mediaType_ = gensupport.DetermineContentType(rdr, mediaType) + c.resumableBuffer_ = gensupport.NewResumableBuffer(rdr, googleapi.DefaultUploadChunkSize) + c.media_ = nil + c.mediaSize_ = size + return c +} + +// ProgressUpdater provides a callback function that will be called +// after every chunk. It should be a low-latency function in order to +// not slow down the upload operation. This should only be called when +// using ResumableMedia (as opposed to Media). +func (c *FilesCreateCall) ProgressUpdater(pu googleapi.ProgressUpdater) *FilesCreateCall { + c.progressUpdater_ = pu + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FilesCreateCall) Fields(s ...googleapi.Field) *FilesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +// This context will supersede any context previously provided to the +// ResumableMedia method. +func (c *FilesCreateCall) Context(ctx context.Context) *FilesCreateCall { + c.ctx_ = ctx + return c +} + +func (c *FilesCreateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.file) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files") + if c.media_ != nil || c.resumableBuffer_ != nil { + urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1) + protocol := "multipart" + if c.resumableBuffer_ != nil { + protocol = "resumable" + } + c.urlParams_.Set("uploadType", protocol) + } + urls += "?" + c.urlParams_.Encode() + if c.media_ != nil { + var combined io.ReadCloser + combined, ctype = gensupport.CombineBodyMedia(body, ctype, c.media_, c.mediaType_) + defer combined.Close() + body = combined + } + req, _ := http.NewRequest("POST", urls, body) + googleapi.SetOpaque(req.URL) + if c.resumableBuffer_ != nil && c.mediaType_ != "" { + req.Header.Set("X-Upload-Content-Type", c.mediaType_) + } + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.files.create" call. +// Exactly one of *File or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *File.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *FilesCreateCall) Do(opts ...googleapi.CallOption) (*File, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + if c.resumableBuffer_ != nil { + loc := res.Header.Get("Location") + rx := &gensupport.ResumableUpload{ + Client: c.s.client, + UserAgent: c.s.userAgent(), + URI: loc, + Media: c.resumableBuffer_, + MediaType: c.mediaType_, + Callback: func(curr int64) { + if c.progressUpdater_ != nil { + c.progressUpdater_(curr, c.mediaSize_) + } + }, + } + ctx := c.ctx_ + if ctx == nil { + ctx = context.TODO() + } + res, err = rx.Upload(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + } + ret := &File{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new file.", + // "httpMethod": "POST", + // "id": "drive.files.create", + // "mediaUpload": { + // "accept": [ + // "*/*" + // ], + // "maxSize": "5120GB", + // "protocols": { + // "resumable": { + // "multipart": true, + // "path": "/resumable/upload/drive/v3/files" + // }, + // "simple": { + // "multipart": true, + // "path": "/upload/drive/v3/files" + // } + // } + // }, + // "parameters": { + // "ignoreDefaultVisibility": { + // "default": "false", + // "description": "Whether to ignore the domain's default visibility settings for the created file. Domain administrators can choose to make all uploaded files visible to the domain by default; this parameter bypasses that behavior for the request. Permissions are still inherited from parent folders.", + // "location": "query", + // "type": "boolean" + // }, + // "keepRevisionForever": { + // "default": "false", + // "description": "Whether to set the 'keepForever' field in the new head revision. This is only applicable to files with binary content in Drive.", + // "location": "query", + // "type": "boolean" + // }, + // "ocrLanguage": { + // "description": "A language hint for OCR processing during image import (ISO 639-1 code).", + // "location": "query", + // "type": "string" + // }, + // "useContentAsIndexableText": { + // "default": "false", + // "description": "Whether to use the uploaded content as indexable text.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "files", + // "request": { + // "$ref": "File" + // }, + // "response": { + // "$ref": "File" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file" + // ], + // "supportsMediaUpload": true, + // "supportsSubscription": true + // } + +} + +// method id "drive.files.delete": + +type FilesDeleteCall struct { + s *Service + fileId string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Permanently deletes a file owned by the user without moving +// it to the trash. If the target is a folder, all descendants owned by +// the user are also deleted. +func (r *FilesService) Delete(fileId string) *FilesDeleteCall { + c := &FilesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FilesDeleteCall) Fields(s ...googleapi.Field) *FilesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FilesDeleteCall) Context(ctx context.Context) *FilesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *FilesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.files.delete" call. +func (c *FilesDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes a file owned by the user without moving it to the trash. If the target is a folder, all descendants owned by the user are also deleted.", + // "httpMethod": "DELETE", + // "id": "drive.files.delete", + // "parameterOrder": [ + // "fileId" + // ], + // "parameters": { + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}", + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file" + // ] + // } + +} + +// method id "drive.files.emptyTrash": + +type FilesEmptyTrashCall struct { + s *Service + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// EmptyTrash: Permanently deletes all of the user's trashed files. +func (r *FilesService) EmptyTrash() *FilesEmptyTrashCall { + c := &FilesEmptyTrashCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FilesEmptyTrashCall) Fields(s ...googleapi.Field) *FilesEmptyTrashCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FilesEmptyTrashCall) Context(ctx context.Context) *FilesEmptyTrashCall { + c.ctx_ = ctx + return c +} + +func (c *FilesEmptyTrashCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/trash") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.files.emptyTrash" call. +func (c *FilesEmptyTrashCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes all of the user's trashed files.", + // "httpMethod": "DELETE", + // "id": "drive.files.emptyTrash", + // "path": "files/trash", + // "scopes": [ + // "https://www.googleapis.com/auth/drive" + // ] + // } + +} + +// method id "drive.files.export": + +type FilesExportCall struct { + s *Service + fileId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Export: Exports a Google Doc to the requested MIME type and returns +// the exported content. +func (r *FilesService) Export(fileId string, mimeType string) *FilesExportCall { + c := &FilesExportCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.urlParams_.Set("mimeType", mimeType) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FilesExportCall) Fields(s ...googleapi.Field) *FilesExportCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *FilesExportCall) IfNoneMatch(entityTag string) *FilesExportCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. +func (c *FilesExportCall) Context(ctx context.Context) *FilesExportCall { + c.ctx_ = ctx + return c +} + +func (c *FilesExportCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/export") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *FilesExportCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, err + } + return res, nil +} + +// Do executes the "drive.files.export" call. +func (c *FilesExportCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Exports a Google Doc to the requested MIME type and returns the exported content.", + // "httpMethod": "GET", + // "id": "drive.files.export", + // "parameterOrder": [ + // "fileId", + // "mimeType" + // ], + // "parameters": { + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "mimeType": { + // "description": "The MIME type of the format requested for this export.", + // "location": "query", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/export", + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.readonly" + // ], + // "supportsMediaDownload": true + // } + +} + +// method id "drive.files.generateIds": + +type FilesGenerateIdsCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// GenerateIds: Generates a set of file IDs which can be provided in +// create requests. +func (r *FilesService) GenerateIds() *FilesGenerateIdsCall { + c := &FilesGenerateIdsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// Count sets the optional parameter "count": The number of IDs to +// return. +func (c *FilesGenerateIdsCall) Count(count int64) *FilesGenerateIdsCall { + c.urlParams_.Set("count", fmt.Sprint(count)) + return c +} + +// Space sets the optional parameter "space": The space in which the IDs +// can be used to create new files. Supported values are 'drive' and +// 'appDataFolder'. +func (c *FilesGenerateIdsCall) Space(space string) *FilesGenerateIdsCall { + c.urlParams_.Set("space", space) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FilesGenerateIdsCall) Fields(s ...googleapi.Field) *FilesGenerateIdsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *FilesGenerateIdsCall) IfNoneMatch(entityTag string) *FilesGenerateIdsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FilesGenerateIdsCall) Context(ctx context.Context) *FilesGenerateIdsCall { + c.ctx_ = ctx + return c +} + +func (c *FilesGenerateIdsCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/generateIds") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.files.generateIds" call. +// Exactly one of *GeneratedIds or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *GeneratedIds.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FilesGenerateIdsCall) Do(opts ...googleapi.CallOption) (*GeneratedIds, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GeneratedIds{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Generates a set of file IDs which can be provided in create requests.", + // "httpMethod": "GET", + // "id": "drive.files.generateIds", + // "parameters": { + // "count": { + // "default": "10", + // "description": "The number of IDs to return.", + // "format": "int32", + // "location": "query", + // "maximum": "1000", + // "minimum": "1", + // "type": "integer" + // }, + // "space": { + // "default": "drive", + // "description": "The space in which the IDs can be used to create new files. Supported values are 'drive' and 'appDataFolder'.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "files/generateIds", + // "response": { + // "$ref": "GeneratedIds" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file" + // ] + // } + +} + +// method id "drive.files.get": + +type FilesGetCall struct { + s *Service + fileId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Gets a file's metadata or content by ID. +func (r *FilesService) Get(fileId string) *FilesGetCall { + c := &FilesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + return c +} + +// AcknowledgeAbuse sets the optional parameter "acknowledgeAbuse": +// Whether the user is acknowledging the risk of downloading known +// malware or other abusive files. This is only applicable when +// alt=media. +func (c *FilesGetCall) AcknowledgeAbuse(acknowledgeAbuse bool) *FilesGetCall { + c.urlParams_.Set("acknowledgeAbuse", fmt.Sprint(acknowledgeAbuse)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FilesGetCall) Fields(s ...googleapi.Field) *FilesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *FilesGetCall) IfNoneMatch(entityTag string) *FilesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. +func (c *FilesGetCall) Context(ctx context.Context) *FilesGetCall { + c.ctx_ = ctx + return c +} + +func (c *FilesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *FilesGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, err + } + return res, nil +} + +// Do executes the "drive.files.get" call. +// Exactly one of *File or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *File.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *FilesGetCall) Do(opts ...googleapi.CallOption) (*File, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &File{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a file's metadata or content by ID.", + // "httpMethod": "GET", + // "id": "drive.files.get", + // "parameterOrder": [ + // "fileId" + // ], + // "parameters": { + // "acknowledgeAbuse": { + // "default": "false", + // "description": "Whether the user is acknowledging the risk of downloading known malware or other abusive files. This is only applicable when alt=media.", + // "location": "query", + // "type": "boolean" + // }, + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}", + // "response": { + // "$ref": "File" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.metadata", + // "https://www.googleapis.com/auth/drive.metadata.readonly", + // "https://www.googleapis.com/auth/drive.photos.readonly", + // "https://www.googleapis.com/auth/drive.readonly" + // ], + // "supportsMediaDownload": true, + // "supportsSubscription": true, + // "useMediaDownloadService": true + // } + +} + +// method id "drive.files.list": + +type FilesListCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Lists or searches files. +func (r *FilesService) List() *FilesListCall { + c := &FilesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// Corpus sets the optional parameter "corpus": The source of files to +// list. +// +// Possible values: +// "domain" - Files shared to the user's domain. +// "user" (default) - Files owned by or shared to the user. +func (c *FilesListCall) Corpus(corpus string) *FilesListCall { + c.urlParams_.Set("corpus", corpus) + return c +} + +// OrderBy sets the optional parameter "orderBy": A comma-separated list +// of sort keys. Valid keys are 'createdTime', 'folder', +// 'modifiedByMeTime', 'modifiedTime', 'name', 'quotaBytesUsed', +// 'recency', 'sharedWithMeTime', 'starred', and 'viewedByMeTime'. Each +// key sorts ascending by default, but may be reversed with the 'desc' +// modifier. Example usage: ?orderBy=folder,modifiedTime desc,name. +// Please note that there is a current limitation for users with +// approximately one million files in which the requested sort order is +// ignored. +func (c *FilesListCall) OrderBy(orderBy string) *FilesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of files to return per page. +func (c *FilesListCall) PageSize(pageSize int64) *FilesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The token for +// continuing a previous list request on the next page. This should be +// set to the value of 'nextPageToken' from the previous response. +func (c *FilesListCall) PageToken(pageToken string) *FilesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Q sets the optional parameter "q": A query for filtering the file +// results. See the "Search for Files" guide for supported syntax. +func (c *FilesListCall) Q(q string) *FilesListCall { + c.urlParams_.Set("q", q) + return c +} + +// Spaces sets the optional parameter "spaces": A comma-separated list +// of spaces to query within the corpus. Supported values are 'drive', +// 'appDataFolder' and 'photos'. +func (c *FilesListCall) Spaces(spaces string) *FilesListCall { + c.urlParams_.Set("spaces", spaces) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FilesListCall) Fields(s ...googleapi.Field) *FilesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *FilesListCall) IfNoneMatch(entityTag string) *FilesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FilesListCall) Context(ctx context.Context) *FilesListCall { + c.ctx_ = ctx + return c +} + +func (c *FilesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.files.list" call. +// Exactly one of *FileList or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *FileList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FilesListCall) Do(opts ...googleapi.CallOption) (*FileList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &FileList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists or searches files.", + // "httpMethod": "GET", + // "id": "drive.files.list", + // "parameters": { + // "corpus": { + // "default": "user", + // "description": "The source of files to list.", + // "enum": [ + // "domain", + // "user" + // ], + // "enumDescriptions": [ + // "Files shared to the user's domain.", + // "Files owned by or shared to the user." + // ], + // "location": "query", + // "type": "string" + // }, + // "orderBy": { + // "description": "A comma-separated list of sort keys. Valid keys are 'createdTime', 'folder', 'modifiedByMeTime', 'modifiedTime', 'name', 'quotaBytesUsed', 'recency', 'sharedWithMeTime', 'starred', and 'viewedByMeTime'. Each key sorts ascending by default, but may be reversed with the 'desc' modifier. Example usage: ?orderBy=folder,modifiedTime desc,name. Please note that there is a current limitation for users with approximately one million files in which the requested sort order is ignored.", + // "location": "query", + // "type": "string" + // }, + // "pageSize": { + // "default": "100", + // "description": "The maximum number of files to return per page.", + // "format": "int32", + // "location": "query", + // "maximum": "1000", + // "minimum": "1", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.", + // "location": "query", + // "type": "string" + // }, + // "q": { + // "description": "A query for filtering the file results. See the \"Search for Files\" guide for supported syntax.", + // "location": "query", + // "type": "string" + // }, + // "spaces": { + // "default": "drive", + // "description": "A comma-separated list of spaces to query within the corpus. Supported values are 'drive', 'appDataFolder' and 'photos'.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "files", + // "response": { + // "$ref": "FileList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.metadata", + // "https://www.googleapis.com/auth/drive.metadata.readonly", + // "https://www.googleapis.com/auth/drive.photos.readonly", + // "https://www.googleapis.com/auth/drive.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *FilesListCall) Pages(ctx context.Context, f func(*FileList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "drive.files.update": + +type FilesUpdateCall struct { + s *Service + fileId string + file *File + urlParams_ gensupport.URLParams + media_ io.Reader + resumableBuffer_ *gensupport.ResumableBuffer + mediaType_ string + mediaSize_ int64 // mediaSize, if known. Used only for calls to progressUpdater_. + progressUpdater_ googleapi.ProgressUpdater + ctx_ context.Context +} + +// Update: Updates a file's metadata and/or content with patch +// semantics. +func (r *FilesService) Update(fileId string, file *File) *FilesUpdateCall { + c := &FilesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.file = file + return c +} + +// AddParents sets the optional parameter "addParents": A +// comma-separated list of parent IDs to add. +func (c *FilesUpdateCall) AddParents(addParents string) *FilesUpdateCall { + c.urlParams_.Set("addParents", addParents) + return c +} + +// KeepRevisionForever sets the optional parameter +// "keepRevisionForever": Whether to set the 'keepForever' field in the +// new head revision. This is only applicable to files with binary +// content in Drive. +func (c *FilesUpdateCall) KeepRevisionForever(keepRevisionForever bool) *FilesUpdateCall { + c.urlParams_.Set("keepRevisionForever", fmt.Sprint(keepRevisionForever)) + return c +} + +// OcrLanguage sets the optional parameter "ocrLanguage": A language +// hint for OCR processing during image import (ISO 639-1 code). +func (c *FilesUpdateCall) OcrLanguage(ocrLanguage string) *FilesUpdateCall { + c.urlParams_.Set("ocrLanguage", ocrLanguage) + return c +} + +// RemoveParents sets the optional parameter "removeParents": A +// comma-separated list of parent IDs to remove. +func (c *FilesUpdateCall) RemoveParents(removeParents string) *FilesUpdateCall { + c.urlParams_.Set("removeParents", removeParents) + return c +} + +// UseContentAsIndexableText sets the optional parameter +// "useContentAsIndexableText": Whether to use the uploaded content as +// indexable text. +func (c *FilesUpdateCall) UseContentAsIndexableText(useContentAsIndexableText bool) *FilesUpdateCall { + c.urlParams_.Set("useContentAsIndexableText", fmt.Sprint(useContentAsIndexableText)) + return c +} + +// Media specifies the media to upload in one or more chunks. The chunk +// size may be controlled by supplying a MediaOption generated by +// googleapi.ChunkSize. The chunk size defaults to +// googleapi.DefaultUploadChunkSize.The Content-Type header used in the +// upload request will be determined by sniffing the contents of r, +// unless a MediaOption generated by googleapi.ContentType is +// supplied. +// At most one of Media and ResumableMedia may be set. +func (c *FilesUpdateCall) Media(r io.Reader, options ...googleapi.MediaOption) *FilesUpdateCall { + opts := googleapi.ProcessMediaOptions(options) + chunkSize := opts.ChunkSize + if !opts.ForceEmptyContentType { + r, c.mediaType_ = gensupport.DetermineContentType(r, opts.ContentType) + } + c.media_, c.resumableBuffer_ = gensupport.PrepareUpload(r, chunkSize) + return c +} + +// ResumableMedia specifies the media to upload in chunks and can be +// canceled with ctx. +// +// Deprecated: use Media instead. +// +// At most one of Media and ResumableMedia may be set. mediaType +// identifies the MIME media type of the upload, such as "image/png". If +// mediaType is "", it will be auto-detected. The provided ctx will +// supersede any context previously provided to the Context method. +func (c *FilesUpdateCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *FilesUpdateCall { + c.ctx_ = ctx + rdr := gensupport.ReaderAtToReader(r, size) + rdr, c.mediaType_ = gensupport.DetermineContentType(rdr, mediaType) + c.resumableBuffer_ = gensupport.NewResumableBuffer(rdr, googleapi.DefaultUploadChunkSize) + c.media_ = nil + c.mediaSize_ = size + return c +} + +// ProgressUpdater provides a callback function that will be called +// after every chunk. It should be a low-latency function in order to +// not slow down the upload operation. This should only be called when +// using ResumableMedia (as opposed to Media). +func (c *FilesUpdateCall) ProgressUpdater(pu googleapi.ProgressUpdater) *FilesUpdateCall { + c.progressUpdater_ = pu + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FilesUpdateCall) Fields(s ...googleapi.Field) *FilesUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +// This context will supersede any context previously provided to the +// ResumableMedia method. +func (c *FilesUpdateCall) Context(ctx context.Context) *FilesUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *FilesUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.file) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}") + if c.media_ != nil || c.resumableBuffer_ != nil { + urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1) + protocol := "multipart" + if c.resumableBuffer_ != nil { + protocol = "resumable" + } + c.urlParams_.Set("uploadType", protocol) + } + urls += "?" + c.urlParams_.Encode() + if c.media_ != nil { + var combined io.ReadCloser + combined, ctype = gensupport.CombineBodyMedia(body, ctype, c.media_, c.mediaType_) + defer combined.Close() + body = combined + } + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + }) + if c.resumableBuffer_ != nil && c.mediaType_ != "" { + req.Header.Set("X-Upload-Content-Type", c.mediaType_) + } + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.files.update" call. +// Exactly one of *File or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *File.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *FilesUpdateCall) Do(opts ...googleapi.CallOption) (*File, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + if c.resumableBuffer_ != nil { + loc := res.Header.Get("Location") + rx := &gensupport.ResumableUpload{ + Client: c.s.client, + UserAgent: c.s.userAgent(), + URI: loc, + Media: c.resumableBuffer_, + MediaType: c.mediaType_, + Callback: func(curr int64) { + if c.progressUpdater_ != nil { + c.progressUpdater_(curr, c.mediaSize_) + } + }, + } + ctx := c.ctx_ + if ctx == nil { + ctx = context.TODO() + } + res, err = rx.Upload(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + } + ret := &File{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a file's metadata and/or content with patch semantics.", + // "httpMethod": "PATCH", + // "id": "drive.files.update", + // "mediaUpload": { + // "accept": [ + // "*/*" + // ], + // "maxSize": "5120GB", + // "protocols": { + // "resumable": { + // "multipart": true, + // "path": "/resumable/upload/drive/v3/files/{fileId}" + // }, + // "simple": { + // "multipart": true, + // "path": "/upload/drive/v3/files/{fileId}" + // } + // } + // }, + // "parameterOrder": [ + // "fileId" + // ], + // "parameters": { + // "addParents": { + // "description": "A comma-separated list of parent IDs to add.", + // "location": "query", + // "type": "string" + // }, + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "keepRevisionForever": { + // "default": "false", + // "description": "Whether to set the 'keepForever' field in the new head revision. This is only applicable to files with binary content in Drive.", + // "location": "query", + // "type": "boolean" + // }, + // "ocrLanguage": { + // "description": "A language hint for OCR processing during image import (ISO 639-1 code).", + // "location": "query", + // "type": "string" + // }, + // "removeParents": { + // "description": "A comma-separated list of parent IDs to remove.", + // "location": "query", + // "type": "string" + // }, + // "useContentAsIndexableText": { + // "default": "false", + // "description": "Whether to use the uploaded content as indexable text.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "files/{fileId}", + // "request": { + // "$ref": "File" + // }, + // "response": { + // "$ref": "File" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.metadata", + // "https://www.googleapis.com/auth/drive.scripts" + // ], + // "supportsMediaUpload": true + // } + +} + +// method id "drive.files.watch": + +type FilesWatchCall struct { + s *Service + fileId string + channel *Channel + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Watch: Subscribes to changes to a file +func (r *FilesService) Watch(fileId string, channel *Channel) *FilesWatchCall { + c := &FilesWatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.channel = channel + return c +} + +// AcknowledgeAbuse sets the optional parameter "acknowledgeAbuse": +// Whether the user is acknowledging the risk of downloading known +// malware or other abusive files. This is only applicable when +// alt=media. +func (c *FilesWatchCall) AcknowledgeAbuse(acknowledgeAbuse bool) *FilesWatchCall { + c.urlParams_.Set("acknowledgeAbuse", fmt.Sprint(acknowledgeAbuse)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FilesWatchCall) Fields(s ...googleapi.Field) *FilesWatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. +func (c *FilesWatchCall) Context(ctx context.Context) *FilesWatchCall { + c.ctx_ = ctx + return c +} + +func (c *FilesWatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/watch") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *FilesWatchCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, err + } + return res, nil +} + +// Do executes the "drive.files.watch" call. +// Exactly one of *Channel or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Channel.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *FilesWatchCall) Do(opts ...googleapi.CallOption) (*Channel, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Channel{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Subscribes to changes to a file", + // "httpMethod": "POST", + // "id": "drive.files.watch", + // "parameterOrder": [ + // "fileId" + // ], + // "parameters": { + // "acknowledgeAbuse": { + // "default": "false", + // "description": "Whether the user is acknowledging the risk of downloading known malware or other abusive files. This is only applicable when alt=media.", + // "location": "query", + // "type": "boolean" + // }, + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/watch", + // "request": { + // "$ref": "Channel", + // "parameterName": "resource" + // }, + // "response": { + // "$ref": "Channel" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.metadata", + // "https://www.googleapis.com/auth/drive.metadata.readonly", + // "https://www.googleapis.com/auth/drive.photos.readonly", + // "https://www.googleapis.com/auth/drive.readonly" + // ], + // "supportsMediaDownload": true, + // "supportsSubscription": true, + // "useMediaDownloadService": true + // } + +} + +// method id "drive.permissions.create": + +type PermissionsCreateCall struct { + s *Service + fileId string + permission *Permission + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Create: Creates a permission for a file. +func (r *PermissionsService) Create(fileId string, permission *Permission) *PermissionsCreateCall { + c := &PermissionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.permission = permission + return c +} + +// EmailMessage sets the optional parameter "emailMessage": A custom +// message to include in the notification email. +func (c *PermissionsCreateCall) EmailMessage(emailMessage string) *PermissionsCreateCall { + c.urlParams_.Set("emailMessage", emailMessage) + return c +} + +// SendNotificationEmail sets the optional parameter +// "sendNotificationEmail": Whether to send a notification email when +// sharing to users or groups. This defaults to true for users and +// groups, and is not allowed for other requests. It must not be +// disabled for ownership transfers. +func (c *PermissionsCreateCall) SendNotificationEmail(sendNotificationEmail bool) *PermissionsCreateCall { + c.urlParams_.Set("sendNotificationEmail", fmt.Sprint(sendNotificationEmail)) + return c +} + +// TransferOwnership sets the optional parameter "transferOwnership": +// Whether to transfer ownership to the specified user and downgrade the +// current owner to a writer. This parameter is required as an +// acknowledgement of the side effect. +func (c *PermissionsCreateCall) TransferOwnership(transferOwnership bool) *PermissionsCreateCall { + c.urlParams_.Set("transferOwnership", fmt.Sprint(transferOwnership)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *PermissionsCreateCall) Fields(s ...googleapi.Field) *PermissionsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *PermissionsCreateCall) Context(ctx context.Context) *PermissionsCreateCall { + c.ctx_ = ctx + return c +} + +func (c *PermissionsCreateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.permission) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/permissions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.permissions.create" call. +// Exactly one of *Permission or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Permission.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *PermissionsCreateCall) Do(opts ...googleapi.CallOption) (*Permission, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Permission{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a permission for a file.", + // "httpMethod": "POST", + // "id": "drive.permissions.create", + // "parameterOrder": [ + // "fileId" + // ], + // "parameters": { + // "emailMessage": { + // "description": "A custom message to include in the notification email.", + // "location": "query", + // "type": "string" + // }, + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sendNotificationEmail": { + // "description": "Whether to send a notification email when sharing to users or groups. This defaults to true for users and groups, and is not allowed for other requests. It must not be disabled for ownership transfers.", + // "location": "query", + // "type": "boolean" + // }, + // "transferOwnership": { + // "default": "false", + // "description": "Whether to transfer ownership to the specified user and downgrade the current owner to a writer. This parameter is required as an acknowledgement of the side effect.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "files/{fileId}/permissions", + // "request": { + // "$ref": "Permission" + // }, + // "response": { + // "$ref": "Permission" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file" + // ] + // } + +} + +// method id "drive.permissions.delete": + +type PermissionsDeleteCall struct { + s *Service + fileId string + permissionId string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes a permission. +func (r *PermissionsService) Delete(fileId string, permissionId string) *PermissionsDeleteCall { + c := &PermissionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.permissionId = permissionId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *PermissionsDeleteCall) Fields(s ...googleapi.Field) *PermissionsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *PermissionsDeleteCall) Context(ctx context.Context) *PermissionsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *PermissionsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/permissions/{permissionId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "permissionId": c.permissionId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.permissions.delete" call. +func (c *PermissionsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Deletes a permission.", + // "httpMethod": "DELETE", + // "id": "drive.permissions.delete", + // "parameterOrder": [ + // "fileId", + // "permissionId" + // ], + // "parameters": { + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "permissionId": { + // "description": "The ID of the permission.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/permissions/{permissionId}", + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file" + // ] + // } + +} + +// method id "drive.permissions.get": + +type PermissionsGetCall struct { + s *Service + fileId string + permissionId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Gets a permission by ID. +func (r *PermissionsService) Get(fileId string, permissionId string) *PermissionsGetCall { + c := &PermissionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.permissionId = permissionId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *PermissionsGetCall) Fields(s ...googleapi.Field) *PermissionsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *PermissionsGetCall) IfNoneMatch(entityTag string) *PermissionsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *PermissionsGetCall) Context(ctx context.Context) *PermissionsGetCall { + c.ctx_ = ctx + return c +} + +func (c *PermissionsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/permissions/{permissionId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "permissionId": c.permissionId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.permissions.get" call. +// Exactly one of *Permission or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Permission.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *PermissionsGetCall) Do(opts ...googleapi.CallOption) (*Permission, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Permission{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a permission by ID.", + // "httpMethod": "GET", + // "id": "drive.permissions.get", + // "parameterOrder": [ + // "fileId", + // "permissionId" + // ], + // "parameters": { + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "permissionId": { + // "description": "The ID of the permission.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/permissions/{permissionId}", + // "response": { + // "$ref": "Permission" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.metadata", + // "https://www.googleapis.com/auth/drive.metadata.readonly", + // "https://www.googleapis.com/auth/drive.photos.readonly", + // "https://www.googleapis.com/auth/drive.readonly" + // ] + // } + +} + +// method id "drive.permissions.list": + +type PermissionsListCall struct { + s *Service + fileId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Lists a file's permissions. +func (r *PermissionsService) List(fileId string) *PermissionsListCall { + c := &PermissionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *PermissionsListCall) Fields(s ...googleapi.Field) *PermissionsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *PermissionsListCall) IfNoneMatch(entityTag string) *PermissionsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *PermissionsListCall) Context(ctx context.Context) *PermissionsListCall { + c.ctx_ = ctx + return c +} + +func (c *PermissionsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/permissions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.permissions.list" call. +// Exactly one of *PermissionList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *PermissionList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *PermissionsListCall) Do(opts ...googleapi.CallOption) (*PermissionList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &PermissionList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists a file's permissions.", + // "httpMethod": "GET", + // "id": "drive.permissions.list", + // "parameterOrder": [ + // "fileId" + // ], + // "parameters": { + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/permissions", + // "response": { + // "$ref": "PermissionList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.metadata", + // "https://www.googleapis.com/auth/drive.metadata.readonly", + // "https://www.googleapis.com/auth/drive.photos.readonly", + // "https://www.googleapis.com/auth/drive.readonly" + // ] + // } + +} + +// method id "drive.permissions.update": + +type PermissionsUpdateCall struct { + s *Service + fileId string + permissionId string + permission *Permission + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates a permission with patch semantics. +func (r *PermissionsService) Update(fileId string, permissionId string, permission *Permission) *PermissionsUpdateCall { + c := &PermissionsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.permissionId = permissionId + c.permission = permission + return c +} + +// TransferOwnership sets the optional parameter "transferOwnership": +// Whether to transfer ownership to the specified user and downgrade the +// current owner to a writer. This parameter is required as an +// acknowledgement of the side effect. +func (c *PermissionsUpdateCall) TransferOwnership(transferOwnership bool) *PermissionsUpdateCall { + c.urlParams_.Set("transferOwnership", fmt.Sprint(transferOwnership)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *PermissionsUpdateCall) Fields(s ...googleapi.Field) *PermissionsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *PermissionsUpdateCall) Context(ctx context.Context) *PermissionsUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *PermissionsUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.permission) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/permissions/{permissionId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "permissionId": c.permissionId, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.permissions.update" call. +// Exactly one of *Permission or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Permission.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *PermissionsUpdateCall) Do(opts ...googleapi.CallOption) (*Permission, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Permission{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a permission with patch semantics.", + // "httpMethod": "PATCH", + // "id": "drive.permissions.update", + // "parameterOrder": [ + // "fileId", + // "permissionId" + // ], + // "parameters": { + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "permissionId": { + // "description": "The ID of the permission.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "transferOwnership": { + // "default": "false", + // "description": "Whether to transfer ownership to the specified user and downgrade the current owner to a writer. This parameter is required as an acknowledgement of the side effect.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "files/{fileId}/permissions/{permissionId}", + // "request": { + // "$ref": "Permission" + // }, + // "response": { + // "$ref": "Permission" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file" + // ] + // } + +} + +// method id "drive.replies.create": + +type RepliesCreateCall struct { + s *Service + fileId string + commentId string + reply *Reply + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Create: Creates a new reply to a comment. +func (r *RepliesService) Create(fileId string, commentId string, reply *Reply) *RepliesCreateCall { + c := &RepliesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.commentId = commentId + c.reply = reply + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RepliesCreateCall) Fields(s ...googleapi.Field) *RepliesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RepliesCreateCall) Context(ctx context.Context) *RepliesCreateCall { + c.ctx_ = ctx + return c +} + +func (c *RepliesCreateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.reply) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}/replies") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "commentId": c.commentId, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.replies.create" call. +// Exactly one of *Reply or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Reply.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RepliesCreateCall) Do(opts ...googleapi.CallOption) (*Reply, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Reply{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new reply to a comment.", + // "httpMethod": "POST", + // "id": "drive.replies.create", + // "parameterOrder": [ + // "fileId", + // "commentId" + // ], + // "parameters": { + // "commentId": { + // "description": "The ID of the comment.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/comments/{commentId}/replies", + // "request": { + // "$ref": "Reply" + // }, + // "response": { + // "$ref": "Reply" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file" + // ] + // } + +} + +// method id "drive.replies.delete": + +type RepliesDeleteCall struct { + s *Service + fileId string + commentId string + replyId string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes a reply. +func (r *RepliesService) Delete(fileId string, commentId string, replyId string) *RepliesDeleteCall { + c := &RepliesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.commentId = commentId + c.replyId = replyId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RepliesDeleteCall) Fields(s ...googleapi.Field) *RepliesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RepliesDeleteCall) Context(ctx context.Context) *RepliesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *RepliesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}/replies/{replyId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "commentId": c.commentId, + "replyId": c.replyId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.replies.delete" call. +func (c *RepliesDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Deletes a reply.", + // "httpMethod": "DELETE", + // "id": "drive.replies.delete", + // "parameterOrder": [ + // "fileId", + // "commentId", + // "replyId" + // ], + // "parameters": { + // "commentId": { + // "description": "The ID of the comment.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "replyId": { + // "description": "The ID of the reply.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/comments/{commentId}/replies/{replyId}", + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file" + // ] + // } + +} + +// method id "drive.replies.get": + +type RepliesGetCall struct { + s *Service + fileId string + commentId string + replyId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Gets a reply by ID. +func (r *RepliesService) Get(fileId string, commentId string, replyId string) *RepliesGetCall { + c := &RepliesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.commentId = commentId + c.replyId = replyId + return c +} + +// IncludeDeleted sets the optional parameter "includeDeleted": Whether +// to return deleted replies. Deleted replies will not include their +// original content. +func (c *RepliesGetCall) IncludeDeleted(includeDeleted bool) *RepliesGetCall { + c.urlParams_.Set("includeDeleted", fmt.Sprint(includeDeleted)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RepliesGetCall) Fields(s ...googleapi.Field) *RepliesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RepliesGetCall) IfNoneMatch(entityTag string) *RepliesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RepliesGetCall) Context(ctx context.Context) *RepliesGetCall { + c.ctx_ = ctx + return c +} + +func (c *RepliesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}/replies/{replyId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "commentId": c.commentId, + "replyId": c.replyId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.replies.get" call. +// Exactly one of *Reply or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Reply.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RepliesGetCall) Do(opts ...googleapi.CallOption) (*Reply, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Reply{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a reply by ID.", + // "httpMethod": "GET", + // "id": "drive.replies.get", + // "parameterOrder": [ + // "fileId", + // "commentId", + // "replyId" + // ], + // "parameters": { + // "commentId": { + // "description": "The ID of the comment.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "includeDeleted": { + // "default": "false", + // "description": "Whether to return deleted replies. Deleted replies will not include their original content.", + // "location": "query", + // "type": "boolean" + // }, + // "replyId": { + // "description": "The ID of the reply.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/comments/{commentId}/replies/{replyId}", + // "response": { + // "$ref": "Reply" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.readonly" + // ] + // } + +} + +// method id "drive.replies.list": + +type RepliesListCall struct { + s *Service + fileId string + commentId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Lists a comment's replies. +func (r *RepliesService) List(fileId string, commentId string) *RepliesListCall { + c := &RepliesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.commentId = commentId + return c +} + +// IncludeDeleted sets the optional parameter "includeDeleted": Whether +// to include deleted replies. Deleted replies will not include their +// original content. +func (c *RepliesListCall) IncludeDeleted(includeDeleted bool) *RepliesListCall { + c.urlParams_.Set("includeDeleted", fmt.Sprint(includeDeleted)) + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of replies to return per page. +func (c *RepliesListCall) PageSize(pageSize int64) *RepliesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The token for +// continuing a previous list request on the next page. This should be +// set to the value of 'nextPageToken' from the previous response. +func (c *RepliesListCall) PageToken(pageToken string) *RepliesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RepliesListCall) Fields(s ...googleapi.Field) *RepliesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RepliesListCall) IfNoneMatch(entityTag string) *RepliesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RepliesListCall) Context(ctx context.Context) *RepliesListCall { + c.ctx_ = ctx + return c +} + +func (c *RepliesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}/replies") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "commentId": c.commentId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.replies.list" call. +// Exactly one of *ReplyList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ReplyList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RepliesListCall) Do(opts ...googleapi.CallOption) (*ReplyList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ReplyList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists a comment's replies.", + // "httpMethod": "GET", + // "id": "drive.replies.list", + // "parameterOrder": [ + // "fileId", + // "commentId" + // ], + // "parameters": { + // "commentId": { + // "description": "The ID of the comment.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "includeDeleted": { + // "default": "false", + // "description": "Whether to include deleted replies. Deleted replies will not include their original content.", + // "location": "query", + // "type": "boolean" + // }, + // "pageSize": { + // "default": "20", + // "description": "The maximum number of replies to return per page.", + // "format": "int32", + // "location": "query", + // "maximum": "100", + // "minimum": "1", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "files/{fileId}/comments/{commentId}/replies", + // "response": { + // "$ref": "ReplyList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RepliesListCall) Pages(ctx context.Context, f func(*ReplyList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "drive.replies.update": + +type RepliesUpdateCall struct { + s *Service + fileId string + commentId string + replyId string + reply *Reply + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates a reply with patch semantics. +func (r *RepliesService) Update(fileId string, commentId string, replyId string, reply *Reply) *RepliesUpdateCall { + c := &RepliesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.commentId = commentId + c.replyId = replyId + c.reply = reply + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RepliesUpdateCall) Fields(s ...googleapi.Field) *RepliesUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RepliesUpdateCall) Context(ctx context.Context) *RepliesUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *RepliesUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.reply) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}/replies/{replyId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "commentId": c.commentId, + "replyId": c.replyId, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.replies.update" call. +// Exactly one of *Reply or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Reply.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RepliesUpdateCall) Do(opts ...googleapi.CallOption) (*Reply, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Reply{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a reply with patch semantics.", + // "httpMethod": "PATCH", + // "id": "drive.replies.update", + // "parameterOrder": [ + // "fileId", + // "commentId", + // "replyId" + // ], + // "parameters": { + // "commentId": { + // "description": "The ID of the comment.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "replyId": { + // "description": "The ID of the reply.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/comments/{commentId}/replies/{replyId}", + // "request": { + // "$ref": "Reply" + // }, + // "response": { + // "$ref": "Reply" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file" + // ] + // } + +} + +// method id "drive.revisions.delete": + +type RevisionsDeleteCall struct { + s *Service + fileId string + revisionId string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Permanently deletes a revision. This method is only +// applicable to files with binary content in Drive. +func (r *RevisionsService) Delete(fileId string, revisionId string) *RevisionsDeleteCall { + c := &RevisionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.revisionId = revisionId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RevisionsDeleteCall) Fields(s ...googleapi.Field) *RevisionsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RevisionsDeleteCall) Context(ctx context.Context) *RevisionsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *RevisionsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/revisions/{revisionId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "revisionId": c.revisionId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.revisions.delete" call. +func (c *RevisionsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes a revision. This method is only applicable to files with binary content in Drive.", + // "httpMethod": "DELETE", + // "id": "drive.revisions.delete", + // "parameterOrder": [ + // "fileId", + // "revisionId" + // ], + // "parameters": { + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "revisionId": { + // "description": "The ID of the revision.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/revisions/{revisionId}", + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file" + // ] + // } + +} + +// method id "drive.revisions.get": + +type RevisionsGetCall struct { + s *Service + fileId string + revisionId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Gets a revision's metadata or content by ID. +func (r *RevisionsService) Get(fileId string, revisionId string) *RevisionsGetCall { + c := &RevisionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.revisionId = revisionId + return c +} + +// AcknowledgeAbuse sets the optional parameter "acknowledgeAbuse": +// Whether the user is acknowledging the risk of downloading known +// malware or other abusive files. This is only applicable when +// alt=media. +func (c *RevisionsGetCall) AcknowledgeAbuse(acknowledgeAbuse bool) *RevisionsGetCall { + c.urlParams_.Set("acknowledgeAbuse", fmt.Sprint(acknowledgeAbuse)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RevisionsGetCall) Fields(s ...googleapi.Field) *RevisionsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RevisionsGetCall) IfNoneMatch(entityTag string) *RevisionsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. +func (c *RevisionsGetCall) Context(ctx context.Context) *RevisionsGetCall { + c.ctx_ = ctx + return c +} + +func (c *RevisionsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/revisions/{revisionId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "revisionId": c.revisionId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *RevisionsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, err + } + return res, nil +} + +// Do executes the "drive.revisions.get" call. +// Exactly one of *Revision or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Revision.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RevisionsGetCall) Do(opts ...googleapi.CallOption) (*Revision, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Revision{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a revision's metadata or content by ID.", + // "httpMethod": "GET", + // "id": "drive.revisions.get", + // "parameterOrder": [ + // "fileId", + // "revisionId" + // ], + // "parameters": { + // "acknowledgeAbuse": { + // "default": "false", + // "description": "Whether the user is acknowledging the risk of downloading known malware or other abusive files. This is only applicable when alt=media.", + // "location": "query", + // "type": "boolean" + // }, + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "revisionId": { + // "description": "The ID of the revision.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/revisions/{revisionId}", + // "response": { + // "$ref": "Revision" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.metadata", + // "https://www.googleapis.com/auth/drive.metadata.readonly", + // "https://www.googleapis.com/auth/drive.photos.readonly", + // "https://www.googleapis.com/auth/drive.readonly" + // ], + // "supportsMediaDownload": true, + // "useMediaDownloadService": true + // } + +} + +// method id "drive.revisions.list": + +type RevisionsListCall struct { + s *Service + fileId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Lists a file's revisions. +func (r *RevisionsService) List(fileId string) *RevisionsListCall { + c := &RevisionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RevisionsListCall) Fields(s ...googleapi.Field) *RevisionsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RevisionsListCall) IfNoneMatch(entityTag string) *RevisionsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RevisionsListCall) Context(ctx context.Context) *RevisionsListCall { + c.ctx_ = ctx + return c +} + +func (c *RevisionsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/revisions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.revisions.list" call. +// Exactly one of *RevisionList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *RevisionList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RevisionsListCall) Do(opts ...googleapi.CallOption) (*RevisionList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &RevisionList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists a file's revisions.", + // "httpMethod": "GET", + // "id": "drive.revisions.list", + // "parameterOrder": [ + // "fileId" + // ], + // "parameters": { + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/revisions", + // "response": { + // "$ref": "RevisionList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.metadata", + // "https://www.googleapis.com/auth/drive.metadata.readonly", + // "https://www.googleapis.com/auth/drive.photos.readonly", + // "https://www.googleapis.com/auth/drive.readonly" + // ] + // } + +} + +// method id "drive.revisions.update": + +type RevisionsUpdateCall struct { + s *Service + fileId string + revisionId string + revision *Revision + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates a revision with patch semantics. +func (r *RevisionsService) Update(fileId string, revisionId string, revision *Revision) *RevisionsUpdateCall { + c := &RevisionsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.revisionId = revisionId + c.revision = revision + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RevisionsUpdateCall) Fields(s ...googleapi.Field) *RevisionsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RevisionsUpdateCall) Context(ctx context.Context) *RevisionsUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *RevisionsUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.revision) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/revisions/{revisionId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "revisionId": c.revisionId, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.revisions.update" call. +// Exactly one of *Revision or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Revision.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RevisionsUpdateCall) Do(opts ...googleapi.CallOption) (*Revision, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Revision{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a revision with patch semantics.", + // "httpMethod": "PATCH", + // "id": "drive.revisions.update", + // "parameterOrder": [ + // "fileId", + // "revisionId" + // ], + // "parameters": { + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "revisionId": { + // "description": "The ID of the revision.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/revisions/{revisionId}", + // "request": { + // "$ref": "Revision" + // }, + // "response": { + // "$ref": "Revision" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file" + // ] + // } + +} diff --git a/vendor/google.golang.org/api/gensupport/backoff.go b/vendor/google.golang.org/api/gensupport/backoff.go new file mode 100644 index 00000000..13561404 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/backoff.go @@ -0,0 +1,46 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "math/rand" + "time" +) + +type BackoffStrategy interface { + // Pause returns the duration of the next pause and true if the operation should be + // retried, or false if no further retries should be attempted. + Pause() (time.Duration, bool) + + // Reset restores the strategy to its initial state. + Reset() +} + +// ExponentialBackoff performs exponential backoff as per https://en.wikipedia.org/wiki/Exponential_backoff. +// The initial pause time is given by Base. +// Once the total pause time exceeds Max, Pause will indicate no further retries. +type ExponentialBackoff struct { + Base time.Duration + Max time.Duration + total time.Duration + n uint +} + +func (eb *ExponentialBackoff) Pause() (time.Duration, bool) { + if eb.total > eb.Max { + return 0, false + } + + // The next pause is selected from randomly from [0, 2^n * Base). + d := time.Duration(rand.Int63n((1 << eb.n) * int64(eb.Base))) + eb.total += d + eb.n++ + return d, true +} + +func (eb *ExponentialBackoff) Reset() { + eb.n = 0 + eb.total = 0 +} diff --git a/vendor/google.golang.org/api/gensupport/buffer.go b/vendor/google.golang.org/api/gensupport/buffer.go new file mode 100644 index 00000000..4b8ec142 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/buffer.go @@ -0,0 +1,77 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "bytes" + "io" + + "google.golang.org/api/googleapi" +) + +// ResumableBuffer buffers data from an io.Reader to support uploading media in retryable chunks. +type ResumableBuffer struct { + media io.Reader + + chunk []byte // The current chunk which is pending upload. The capacity is the chunk size. + err error // Any error generated when populating chunk by reading media. + + // The absolute position of chunk in the underlying media. + off int64 +} + +func NewResumableBuffer(media io.Reader, chunkSize int) *ResumableBuffer { + return &ResumableBuffer{media: media, chunk: make([]byte, 0, chunkSize)} +} + +// Chunk returns the current buffered chunk, the offset in the underlying media +// from which the chunk is drawn, and the size of the chunk. +// Successive calls to Chunk return the same chunk between calls to Next. +func (rb *ResumableBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) { + // There may already be data in chunk if Next has not been called since the previous call to Chunk. + if rb.err == nil && len(rb.chunk) == 0 { + rb.err = rb.loadChunk() + } + return bytes.NewReader(rb.chunk), rb.off, len(rb.chunk), rb.err +} + +// loadChunk will read from media into chunk, up to the capacity of chunk. +func (rb *ResumableBuffer) loadChunk() error { + bufSize := cap(rb.chunk) + rb.chunk = rb.chunk[:bufSize] + + read := 0 + var err error + for err == nil && read < bufSize { + var n int + n, err = rb.media.Read(rb.chunk[read:]) + read += n + } + rb.chunk = rb.chunk[:read] + return err +} + +// Next advances to the next chunk, which will be returned by the next call to Chunk. +// Calls to Next without a corresponding prior call to Chunk will have no effect. +func (rb *ResumableBuffer) Next() { + rb.off += int64(len(rb.chunk)) + rb.chunk = rb.chunk[0:0] +} + +type readerTyper struct { + io.Reader + googleapi.ContentTyper +} + +// ReaderAtToReader adapts a ReaderAt to be used as a Reader. +// If ra implements googleapi.ContentTyper, then the returned reader +// will also implement googleapi.ContentTyper, delegating to ra. +func ReaderAtToReader(ra io.ReaderAt, size int64) io.Reader { + r := io.NewSectionReader(ra, 0, size) + if typer, ok := ra.(googleapi.ContentTyper); ok { + return readerTyper{r, typer} + } + return r +} diff --git a/vendor/google.golang.org/api/gensupport/doc.go b/vendor/google.golang.org/api/gensupport/doc.go new file mode 100644 index 00000000..752c4b41 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/doc.go @@ -0,0 +1,10 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gensupport is an internal implementation detail used by code +// generated by the google-api-go-generator tool. +// +// This package may be modified at any time without regard for backwards +// compatibility. It should not be used directly by API users. +package gensupport diff --git a/vendor/google.golang.org/api/gensupport/json.go b/vendor/google.golang.org/api/gensupport/json.go new file mode 100644 index 00000000..dd7bcd2e --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/json.go @@ -0,0 +1,172 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" +) + +// MarshalJSON returns a JSON encoding of schema containing only selected fields. +// A field is selected if: +// * it has a non-empty value, or +// * its field name is present in forceSendFields, and +// * it is not a nil pointer or nil interface. +// The JSON key for each selected field is taken from the field's json: struct tag. +func MarshalJSON(schema interface{}, forceSendFields []string) ([]byte, error) { + if len(forceSendFields) == 0 { + return json.Marshal(schema) + } + + mustInclude := make(map[string]struct{}) + for _, f := range forceSendFields { + mustInclude[f] = struct{}{} + } + + dataMap, err := schemaToMap(schema, mustInclude) + if err != nil { + return nil, err + } + return json.Marshal(dataMap) +} + +func schemaToMap(schema interface{}, mustInclude map[string]struct{}) (map[string]interface{}, error) { + m := make(map[string]interface{}) + s := reflect.ValueOf(schema) + st := s.Type() + + for i := 0; i < s.NumField(); i++ { + jsonTag := st.Field(i).Tag.Get("json") + if jsonTag == "" { + continue + } + tag, err := parseJSONTag(jsonTag) + if err != nil { + return nil, err + } + if tag.ignore { + continue + } + + v := s.Field(i) + f := st.Field(i) + if !includeField(v, f, mustInclude) { + continue + } + + // nil maps are treated as empty maps. + if f.Type.Kind() == reflect.Map && v.IsNil() { + m[tag.apiName] = map[string]string{} + continue + } + + // nil slices are treated as empty slices. + if f.Type.Kind() == reflect.Slice && v.IsNil() { + m[tag.apiName] = []bool{} + continue + } + + if tag.stringFormat { + m[tag.apiName] = formatAsString(v, f.Type.Kind()) + } else { + m[tag.apiName] = v.Interface() + } + } + return m, nil +} + +// formatAsString returns a string representation of v, dereferencing it first if possible. +func formatAsString(v reflect.Value, kind reflect.Kind) string { + if kind == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + + return fmt.Sprintf("%v", v.Interface()) +} + +// jsonTag represents a restricted version of the struct tag format used by encoding/json. +// It is used to describe the JSON encoding of fields in a Schema struct. +type jsonTag struct { + apiName string + stringFormat bool + ignore bool +} + +// parseJSONTag parses a restricted version of the struct tag format used by encoding/json. +// The format of the tag must match that generated by the Schema.writeSchemaStruct method +// in the api generator. +func parseJSONTag(val string) (jsonTag, error) { + if val == "-" { + return jsonTag{ignore: true}, nil + } + + var tag jsonTag + + i := strings.Index(val, ",") + if i == -1 || val[:i] == "" { + return tag, fmt.Errorf("malformed json tag: %s", val) + } + + tag = jsonTag{ + apiName: val[:i], + } + + switch val[i+1:] { + case "omitempty": + case "omitempty,string": + tag.stringFormat = true + default: + return tag, fmt.Errorf("malformed json tag: %s", val) + } + + return tag, nil +} + +// Reports whether the struct field "f" with value "v" should be included in JSON output. +func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]struct{}) bool { + // The regular JSON encoding of a nil pointer is "null", which means "delete this field". + // Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set. + // However, many fields are not pointers, so there would be no way to delete these fields. + // Rather than partially supporting field deletion, we ignore mustInclude for nil pointer fields. + // Deletion will be handled by a separate mechanism. + if f.Type.Kind() == reflect.Ptr && v.IsNil() { + return false + } + + // The "any" type is represented as an interface{}. If this interface + // is nil, there is no reasonable representation to send. We ignore + // these fields, for the same reasons as given above for pointers. + if f.Type.Kind() == reflect.Interface && v.IsNil() { + return false + } + + _, ok := mustInclude[f.Name] + return ok || !isEmptyValue(v) +} + +// isEmptyValue reports whether v is the empty value for its type. This +// implementation is based on that of the encoding/json package, but its +// correctness does not depend on it being identical. What's important is that +// this function return false in situations where v should not be sent as part +// of a PATCH operation. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} diff --git a/vendor/google.golang.org/api/gensupport/media.go b/vendor/google.golang.org/api/gensupport/media.go new file mode 100644 index 00000000..817f46f5 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/media.go @@ -0,0 +1,200 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/textproto" + + "google.golang.org/api/googleapi" +) + +const sniffBuffSize = 512 + +func newContentSniffer(r io.Reader) *contentSniffer { + return &contentSniffer{r: r} +} + +// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. +type contentSniffer struct { + r io.Reader + start []byte // buffer for the sniffed bytes. + err error // set to any error encountered while reading bytes to be sniffed. + + ctype string // set on first sniff. + sniffed bool // set to true on first sniff. +} + +func (cs *contentSniffer) Read(p []byte) (n int, err error) { + // Ensure that the content type is sniffed before any data is consumed from Reader. + _, _ = cs.ContentType() + + if len(cs.start) > 0 { + n := copy(p, cs.start) + cs.start = cs.start[n:] + return n, nil + } + + // We may have read some bytes into start while sniffing, even if the read ended in an error. + // We should first return those bytes, then the error. + if cs.err != nil { + return 0, cs.err + } + + // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. + return cs.r.Read(p) +} + +// ContentType returns the sniffed content type, and whether the content type was succesfully sniffed. +func (cs *contentSniffer) ContentType() (string, bool) { + if cs.sniffed { + return cs.ctype, cs.ctype != "" + } + cs.sniffed = true + // If ReadAll hits EOF, it returns err==nil. + cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) + + // Don't try to detect the content type based on possibly incomplete data. + if cs.err != nil { + return "", false + } + + cs.ctype = http.DetectContentType(cs.start) + return cs.ctype, true +} + +// DetermineContentType determines the content type of the supplied reader. +// If the content type is already known, it can be specified via ctype. +// Otherwise, the content of media will be sniffed to determine the content type. +// If media implements googleapi.ContentTyper (deprecated), this will be used +// instead of sniffing the content. +// After calling DetectContentType the caller must not perform further reads on +// media, but rather read from the Reader that is returned. +func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) { + // Note: callers could avoid calling DetectContentType if ctype != "", + // but doing the check inside this function reduces the amount of + // generated code. + if ctype != "" { + return media, ctype + } + + // For backwards compatability, allow clients to set content + // type by providing a ContentTyper for media. + if typer, ok := media.(googleapi.ContentTyper); ok { + return media, typer.ContentType() + } + + sniffer := newContentSniffer(media) + if ctype, ok := sniffer.ContentType(); ok { + return sniffer, ctype + } + // If content type could not be sniffed, reads from sniffer will eventually fail with an error. + return sniffer, "" +} + +type typeReader struct { + io.Reader + typ string +} + +// multipartReader combines the contents of multiple readers to creat a multipart/related HTTP body. +// Close must be called if reads from the multipartReader are abandoned before reaching EOF. +type multipartReader struct { + pr *io.PipeReader + pipeOpen bool + ctype string +} + +func newMultipartReader(parts []typeReader) *multipartReader { + mp := &multipartReader{pipeOpen: true} + var pw *io.PipeWriter + mp.pr, pw = io.Pipe() + mpw := multipart.NewWriter(pw) + mp.ctype = "multipart/related; boundary=" + mpw.Boundary() + go func() { + for _, part := range parts { + w, err := mpw.CreatePart(typeHeader(part.typ)) + if err != nil { + mpw.Close() + pw.CloseWithError(fmt.Errorf("googleapi: CreatePart failed: %v", err)) + return + } + _, err = io.Copy(w, part.Reader) + if err != nil { + mpw.Close() + pw.CloseWithError(fmt.Errorf("googleapi: Copy failed: %v", err)) + return + } + } + + mpw.Close() + pw.Close() + }() + return mp +} + +func (mp *multipartReader) Read(data []byte) (n int, err error) { + return mp.pr.Read(data) +} + +func (mp *multipartReader) Close() error { + if !mp.pipeOpen { + return nil + } + mp.pipeOpen = false + return mp.pr.Close() +} + +// CombineBodyMedia combines a json body with media content to create a multipart/related HTTP body. +// It returns a ReadCloser containing the combined body, and the overall "multipart/related" content type, with random boundary. +// +// The caller must call Close on the returned ReadCloser if reads are abandoned before reaching EOF. +func CombineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType string) (io.ReadCloser, string) { + mp := newMultipartReader([]typeReader{ + {body, bodyContentType}, + {media, mediaContentType}, + }) + return mp, mp.ctype +} + +func typeHeader(contentType string) textproto.MIMEHeader { + h := make(textproto.MIMEHeader) + if contentType != "" { + h.Set("Content-Type", contentType) + } + return h +} + +// PrepareUpload determines whether the data in the supplied reader should be +// uploaded in a single request, or in sequential chunks. +// chunkSize is the size of the chunk that media should be split into. +// If chunkSize is non-zero and the contents of media do not fit in a single +// chunk (or there is an error reading media), then media will be returned as a +// ResumableBuffer. Otherwise, media will be returned as a Reader. +// +// After PrepareUpload has been called, media should no longer be used: the +// media content should be accessed via one of the return values. +func PrepareUpload(media io.Reader, chunkSize int) (io.Reader, + *ResumableBuffer) { + if chunkSize == 0 { // do not chunk + return media, nil + } + + rb := NewResumableBuffer(media, chunkSize) + rdr, _, _, err := rb.Chunk() + + if err == io.EOF { // we can upload this in a single request + return rdr, nil + } + // err might be a non-EOF error. If it is, the next call to rb.Chunk will + // return the same error. Returning a ResumableBuffer ensures that this error + // will be handled at some point. + + return nil, rb +} diff --git a/vendor/google.golang.org/api/gensupport/params.go b/vendor/google.golang.org/api/gensupport/params.go new file mode 100644 index 00000000..3b3c7439 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/params.go @@ -0,0 +1,50 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "net/url" + + "google.golang.org/api/googleapi" +) + +// URLParams is a simplified replacement for url.Values +// that safely builds up URL parameters for encoding. +type URLParams map[string][]string + +// Get returns the first value for the given key, or "". +func (u URLParams) Get(key string) string { + vs := u[key] + if len(vs) == 0 { + return "" + } + return vs[0] +} + +// Set sets the key to value. +// It replaces any existing values. +func (u URLParams) Set(key, value string) { + u[key] = []string{value} +} + +// SetMulti sets the key to an array of values. +// It replaces any existing values. +// Note that values must not be modified after calling SetMulti +// so the caller is responsible for making a copy if necessary. +func (u URLParams) SetMulti(key string, values []string) { + u[key] = values +} + +// Encode encodes the values into ``URL encoded'' form +// ("bar=baz&foo=quux") sorted by key. +func (u URLParams) Encode() string { + return url.Values(u).Encode() +} + +func SetOptions(u URLParams, opts ...googleapi.CallOption) { + for _, o := range opts { + u.Set(o.Get()) + } +} diff --git a/vendor/google.golang.org/api/gensupport/resumable.go b/vendor/google.golang.org/api/gensupport/resumable.go new file mode 100644 index 00000000..b3e774aa --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/resumable.go @@ -0,0 +1,198 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "fmt" + "io" + "net/http" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +const ( + // statusResumeIncomplete is the code returned by the Google uploader + // when the transfer is not yet complete. + statusResumeIncomplete = 308 + + // statusTooManyRequests is returned by the storage API if the + // per-project limits have been temporarily exceeded. The request + // should be retried. + // https://cloud.google.com/storage/docs/json_api/v1/status-codes#standardcodes + statusTooManyRequests = 429 +) + +// ResumableUpload is used by the generated APIs to provide resumable uploads. +// It is not used by developers directly. +type ResumableUpload struct { + Client *http.Client + // URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable". + URI string + UserAgent string // User-Agent for header of the request + // Media is the object being uploaded. + Media *ResumableBuffer + // MediaType defines the media type, e.g. "image/jpeg". + MediaType string + + mu sync.Mutex // guards progress + progress int64 // number of bytes uploaded so far + + // Callback is an optional function that will be periodically called with the cumulative number of bytes uploaded. + Callback func(int64) + + // If not specified, a default exponential backoff strategy will be used. + Backoff BackoffStrategy +} + +// Progress returns the number of bytes uploaded at this point. +func (rx *ResumableUpload) Progress() int64 { + rx.mu.Lock() + defer rx.mu.Unlock() + return rx.progress +} + +// doUploadRequest performs a single HTTP request to upload data. +// off specifies the offset in rx.Media from which data is drawn. +// size is the number of bytes in data. +// final specifies whether data is the final chunk to be uploaded. +func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, off, size int64, final bool) (*http.Response, error) { + req, err := http.NewRequest("POST", rx.URI, data) + if err != nil { + return nil, err + } + + req.ContentLength = size + var contentRange string + if final { + if size == 0 { + contentRange = fmt.Sprintf("bytes */%v", off) + } else { + contentRange = fmt.Sprintf("bytes %v-%v/%v", off, off+size-1, off+size) + } + } else { + contentRange = fmt.Sprintf("bytes %v-%v/*", off, off+size-1) + } + req.Header.Set("Content-Range", contentRange) + req.Header.Set("Content-Type", rx.MediaType) + req.Header.Set("User-Agent", rx.UserAgent) + return ctxhttp.Do(ctx, rx.Client, req) + +} + +// reportProgress calls a user-supplied callback to report upload progress. +// If old==updated, the callback is not called. +func (rx *ResumableUpload) reportProgress(old, updated int64) { + if updated-old == 0 { + return + } + rx.mu.Lock() + rx.progress = updated + rx.mu.Unlock() + if rx.Callback != nil { + rx.Callback(updated) + } +} + +// transferChunk performs a single HTTP request to upload a single chunk from rx.Media. +func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, error) { + chunk, off, size, err := rx.Media.Chunk() + + done := err == io.EOF + if !done && err != nil { + return nil, err + } + + res, err := rx.doUploadRequest(ctx, chunk, off, int64(size), done) + if err != nil { + return res, err + } + + if res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK { + rx.reportProgress(off, off+int64(size)) + } + + if res.StatusCode == statusResumeIncomplete { + rx.Media.Next() + } + return res, nil +} + +func contextDone(ctx context.Context) bool { + select { + case <-ctx.Done(): + return true + default: + return false + } +} + +// Upload starts the process of a resumable upload with a cancellable context. +// It retries using the provided back off strategy until cancelled or the +// strategy indicates to stop retrying. +// It is called from the auto-generated API code and is not visible to the user. +// rx is private to the auto-generated API code. +// Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close. +func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) { + var pause time.Duration + backoff := rx.Backoff + if backoff == nil { + backoff = DefaultBackoffStrategy() + } + + for { + // Ensure that we return in the case of cancelled context, even if pause is 0. + if contextDone(ctx) { + return nil, ctx.Err() + } + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(pause): + } + + resp, err = rx.transferChunk(ctx) + + var status int + if resp != nil { + status = resp.StatusCode + } + + // Check if we should retry the request. + if shouldRetry(status, err) { + var retry bool + pause, retry = backoff.Pause() + if retry { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + continue + } + } + + // If the chunk was uploaded successfully, but there's still + // more to go, upload the next chunk without any delay. + if status == statusResumeIncomplete { + pause = 0 + backoff.Reset() + resp.Body.Close() + continue + } + + // It's possible for err and resp to both be non-nil here, but we expose a simpler + // contract to our callers: exactly one of resp and err will be non-nil. This means + // that any response body must be closed here before returning a non-nil error. + if err != nil { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + return nil, err + } + + return resp, nil + } +} diff --git a/vendor/google.golang.org/api/gensupport/retry.go b/vendor/google.golang.org/api/gensupport/retry.go new file mode 100644 index 00000000..7f83d1da --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/retry.go @@ -0,0 +1,77 @@ +package gensupport + +import ( + "io" + "net" + "net/http" + "time" + + "golang.org/x/net/context" +) + +// Retry invokes the given function, retrying it multiple times if the connection failed or +// the HTTP status response indicates the request should be attempted again. ctx may be nil. +func Retry(ctx context.Context, f func() (*http.Response, error), backoff BackoffStrategy) (*http.Response, error) { + for { + resp, err := f() + + var status int + if resp != nil { + status = resp.StatusCode + } + + // Return if we shouldn't retry. + pause, retry := backoff.Pause() + if !shouldRetry(status, err) || !retry { + return resp, err + } + + // Ensure the response body is closed, if any. + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + + // Pause, but still listen to ctx.Done if context is not nil. + var done <-chan struct{} + if ctx != nil { + done = ctx.Done() + } + select { + case <-done: + return nil, ctx.Err() + case <-time.After(pause): + } + } +} + +// DefaultBackoffStrategy returns a default strategy to use for retrying failed upload requests. +func DefaultBackoffStrategy() BackoffStrategy { + return &ExponentialBackoff{ + Base: 250 * time.Millisecond, + Max: 16 * time.Second, + } +} + +// shouldRetry returns true if the HTTP response / error indicates that the +// request should be attempted again. +func shouldRetry(status int, err error) bool { + // Retry for 5xx response codes. + if 500 <= status && status < 600 { + return true + } + + // Retry on statusTooManyRequests{ + if status == statusTooManyRequests { + return true + } + + // Retry on unexpected EOFs and temporary network errors. + if err == io.ErrUnexpectedEOF { + return true + } + if err, ok := err.(net.Error); ok { + return err.Temporary() + } + + return false +} diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go new file mode 100644 index 00000000..858537e0 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -0,0 +1,432 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package googleapi contains the common code shared by all Google API +// libraries. +package googleapi + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "google.golang.org/api/googleapi/internal/uritemplates" +) + +// ContentTyper is an interface for Readers which know (or would like +// to override) their Content-Type. If a media body doesn't implement +// ContentTyper, the type is sniffed from the content using +// http.DetectContentType. +type ContentTyper interface { + ContentType() string +} + +// A SizeReaderAt is a ReaderAt with a Size method. +// An io.SectionReader implements SizeReaderAt. +type SizeReaderAt interface { + io.ReaderAt + Size() int64 +} + +// ServerResponse is embedded in each Do response and +// provides the HTTP status code and header sent by the server. +type ServerResponse struct { + // HTTPStatusCode is the server's response status code. + // When using a resource method's Do call, this will always be in the 2xx range. + HTTPStatusCode int + // Header contains the response header fields from the server. + Header http.Header +} + +const ( + Version = "0.5" + + // UserAgent is the header string used to identify this package. + UserAgent = "google-api-go-client/" + Version + + // The default chunk size to use for resumable uplods if not specified by the user. + DefaultUploadChunkSize = 8 * 1024 * 1024 + + // The minimum chunk size that can be used for resumable uploads. All + // user-specified chunk sizes must be multiple of this value. + MinUploadChunkSize = 256 * 1024 +) + +// Error contains an error response from the server. +type Error struct { + // Code is the HTTP response status code and will always be populated. + Code int `json:"code"` + // Message is the server response message and is only populated when + // explicitly referenced by the JSON server response. + Message string `json:"message"` + // Body is the raw response returned by the server. + // It is often but not always JSON, depending on how the request fails. + Body string + // Header contains the response header fields from the server. + Header http.Header + + Errors []ErrorItem +} + +// ErrorItem is a detailed error code & message from the Google API frontend. +type ErrorItem struct { + // Reason is the typed error code. For example: "some_example". + Reason string `json:"reason"` + // Message is the human-readable description of the error. + Message string `json:"message"` +} + +func (e *Error) Error() string { + if len(e.Errors) == 0 && e.Message == "" { + return fmt.Sprintf("googleapi: got HTTP response code %d with body: %v", e.Code, e.Body) + } + var buf bytes.Buffer + fmt.Fprintf(&buf, "googleapi: Error %d: ", e.Code) + if e.Message != "" { + fmt.Fprintf(&buf, "%s", e.Message) + } + if len(e.Errors) == 0 { + return strings.TrimSpace(buf.String()) + } + if len(e.Errors) == 1 && e.Errors[0].Message == e.Message { + fmt.Fprintf(&buf, ", %s", e.Errors[0].Reason) + return buf.String() + } + fmt.Fprintln(&buf, "\nMore details:") + for _, v := range e.Errors { + fmt.Fprintf(&buf, "Reason: %s, Message: %s\n", v.Reason, v.Message) + } + return buf.String() +} + +type errorReply struct { + Error *Error `json:"error"` +} + +// CheckResponse returns an error (of type *Error) if the response +// status code is not 2xx. +func CheckResponse(res *http.Response) error { + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + slurp, err := ioutil.ReadAll(res.Body) + if err == nil { + jerr := new(errorReply) + err = json.Unmarshal(slurp, jerr) + if err == nil && jerr.Error != nil { + if jerr.Error.Code == 0 { + jerr.Error.Code = res.StatusCode + } + jerr.Error.Body = string(slurp) + return jerr.Error + } + } + return &Error{ + Code: res.StatusCode, + Body: string(slurp), + Header: res.Header, + } +} + +// IsNotModified reports whether err is the result of the +// server replying with http.StatusNotModified. +// Such error values are sometimes returned by "Do" methods +// on calls when If-None-Match is used. +func IsNotModified(err error) bool { + if err == nil { + return false + } + ae, ok := err.(*Error) + return ok && ae.Code == http.StatusNotModified +} + +// CheckMediaResponse returns an error (of type *Error) if the response +// status code is not 2xx. Unlike CheckResponse it does not assume the +// body is a JSON error document. +func CheckMediaResponse(res *http.Response) error { + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) + res.Body.Close() + return &Error{ + Code: res.StatusCode, + Body: string(slurp), + } +} + +type MarshalStyle bool + +var WithDataWrapper = MarshalStyle(true) +var WithoutDataWrapper = MarshalStyle(false) + +func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { + buf := new(bytes.Buffer) + if wrap { + buf.Write([]byte(`{"data": `)) + } + err := json.NewEncoder(buf).Encode(v) + if err != nil { + return nil, err + } + if wrap { + buf.Write([]byte(`}`)) + } + return buf, nil +} + +// endingWithErrorReader from r until it returns an error. If the +// final error from r is io.EOF and e is non-nil, e is used instead. +type endingWithErrorReader struct { + r io.Reader + e error +} + +func (er endingWithErrorReader) Read(p []byte) (n int, err error) { + n, err = er.r.Read(p) + if err == io.EOF && er.e != nil { + err = er.e + } + return +} + +// countingWriter counts the number of bytes it receives to write, but +// discards them. +type countingWriter struct { + n *int64 +} + +func (w countingWriter) Write(p []byte) (int, error) { + *w.n += int64(len(p)) + return len(p), nil +} + +// ProgressUpdater is a function that is called upon every progress update of a resumable upload. +// This is the only part of a resumable upload (from googleapi) that is usable by the developer. +// The remaining usable pieces of resumable uploads is exposed in each auto-generated API. +type ProgressUpdater func(current, total int64) + +type MediaOption interface { + setOptions(o *MediaOptions) +} + +type contentTypeOption string + +func (ct contentTypeOption) setOptions(o *MediaOptions) { + o.ContentType = string(ct) + if o.ContentType == "" { + o.ForceEmptyContentType = true + } +} + +// ContentType returns a MediaOption which sets the Content-Type header for media uploads. +// If ctype is empty, the Content-Type header will be omitted. +func ContentType(ctype string) MediaOption { + return contentTypeOption(ctype) +} + +type chunkSizeOption int + +func (cs chunkSizeOption) setOptions(o *MediaOptions) { + size := int(cs) + if size%MinUploadChunkSize != 0 { + size += MinUploadChunkSize - (size % MinUploadChunkSize) + } + o.ChunkSize = size +} + +// ChunkSize returns a MediaOption which sets the chunk size for media uploads. +// size will be rounded up to the nearest multiple of 256K. +// Media which contains fewer than size bytes will be uploaded in a single request. +// Media which contains size bytes or more will be uploaded in separate chunks. +// If size is zero, media will be uploaded in a single request. +func ChunkSize(size int) MediaOption { + return chunkSizeOption(size) +} + +// MediaOptions stores options for customizing media upload. It is not used by developers directly. +type MediaOptions struct { + ContentType string + ForceEmptyContentType bool + + ChunkSize int +} + +// ProcessMediaOptions stores options from opts in a MediaOptions. +// It is not used by developers directly. +func ProcessMediaOptions(opts []MediaOption) *MediaOptions { + mo := &MediaOptions{ChunkSize: DefaultUploadChunkSize} + for _, o := range opts { + o.setOptions(mo) + } + return mo +} + +func ResolveRelative(basestr, relstr string) string { + u, _ := url.Parse(basestr) + rel, _ := url.Parse(relstr) + u = u.ResolveReference(rel) + us := u.String() + us = strings.Replace(us, "%7B", "{", -1) + us = strings.Replace(us, "%7D", "}", -1) + return us +} + +// has4860Fix is whether this Go environment contains the fix for +// http://golang.org/issue/4860 +var has4860Fix bool + +// init initializes has4860Fix by checking the behavior of the net/http package. +func init() { + r := http.Request{ + URL: &url.URL{ + Scheme: "http", + Opaque: "//opaque", + }, + } + b := &bytes.Buffer{} + r.Write(b) + has4860Fix = bytes.HasPrefix(b.Bytes(), []byte("GET http")) +} + +// SetOpaque sets u.Opaque from u.Path such that HTTP requests to it +// don't alter any hex-escaped characters in u.Path. +func SetOpaque(u *url.URL) { + u.Opaque = "//" + u.Host + u.Path + if !has4860Fix { + u.Opaque = u.Scheme + ":" + u.Opaque + } +} + +// Expand subsitutes any {encoded} strings in the URL passed in using +// the map supplied. +// +// This calls SetOpaque to avoid encoding of the parameters in the URL path. +func Expand(u *url.URL, expansions map[string]string) { + expanded, err := uritemplates.Expand(u.Path, expansions) + if err == nil { + u.Path = expanded + SetOpaque(u) + } +} + +// CloseBody is used to close res.Body. +// Prior to calling Close, it also tries to Read a small amount to see an EOF. +// Not seeing an EOF can prevent HTTP Transports from reusing connections. +func CloseBody(res *http.Response) { + if res == nil || res.Body == nil { + return + } + // Justification for 3 byte reads: two for up to "\r\n" after + // a JSON/XML document, and then 1 to see EOF if we haven't yet. + // TODO(bradfitz): detect Go 1.3+ and skip these reads. + // See https://codereview.appspot.com/58240043 + // and https://codereview.appspot.com/49570044 + buf := make([]byte, 1) + for i := 0; i < 3; i++ { + _, err := res.Body.Read(buf) + if err != nil { + break + } + } + res.Body.Close() + +} + +// VariantType returns the type name of the given variant. +// If the map doesn't contain the named key or the value is not a []interface{}, "" is returned. +// This is used to support "variant" APIs that can return one of a number of different types. +func VariantType(t map[string]interface{}) string { + s, _ := t["type"].(string) + return s +} + +// ConvertVariant uses the JSON encoder/decoder to fill in the struct 'dst' with the fields found in variant 'v'. +// This is used to support "variant" APIs that can return one of a number of different types. +// It reports whether the conversion was successful. +func ConvertVariant(v map[string]interface{}, dst interface{}) bool { + var buf bytes.Buffer + err := json.NewEncoder(&buf).Encode(v) + if err != nil { + return false + } + return json.Unmarshal(buf.Bytes(), dst) == nil +} + +// A Field names a field to be retrieved with a partial response. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// +// Partial responses can dramatically reduce the amount of data that must be sent to your application. +// In order to request partial responses, you can specify the full list of fields +// that your application needs by adding the Fields option to your request. +// +// Field strings use camelCase with leading lower-case characters to identify fields within the response. +// +// For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields, +// you could request just those fields like this: +// +// svc.Events.List().Fields("nextPageToken", "items/id").Do() +// +// or if you were also interested in each Item's "Updated" field, you can combine them like this: +// +// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() +// +// More information about field formatting can be found here: +// https://developers.google.com/+/api/#fields-syntax +// +// Another way to find field names is through the Google API explorer: +// https://developers.google.com/apis-explorer/#p/ +type Field string + +// CombineFields combines fields into a single string. +func CombineFields(s []Field) string { + r := make([]string, len(s)) + for i, v := range s { + r[i] = string(v) + } + return strings.Join(r, ",") +} + +// A CallOption is an optional argument to an API call. +// It should be treated as an opaque value by users of Google APIs. +// +// A CallOption is something that configures an API call in a way that is +// not specific to that API; for instance, controlling the quota user for +// an API call is common across many APIs, and is thus a CallOption. +type CallOption interface { + Get() (key, value string) +} + +// QuotaUser returns a CallOption that will set the quota user for a call. +// The quota user can be used by server-side applications to control accounting. +// It can be an arbitrary string up to 40 characters, and will override UserIP +// if both are provided. +func QuotaUser(u string) CallOption { return quotaUser(u) } + +type quotaUser string + +func (q quotaUser) Get() (string, string) { return "quotaUser", string(q) } + +// UserIP returns a CallOption that will set the "userIp" parameter of a call. +// This should be the IP address of the originating request. +func UserIP(ip string) CallOption { return userIP(ip) } + +type userIP string + +func (i userIP) Get() (string, string) { return "userIp", string(i) } + +// Trace returns a CallOption that enables diagnostic tracing for a call. +// traceToken is an ID supplied by Google support. +func Trace(traceToken string) CallOption { return traceTok(traceToken) } + +type traceTok string + +func (t traceTok) Get() (string, string) { return "trace", "token:" + string(t) } + +// TODO: Fields too diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE b/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE new file mode 100644 index 00000000..de9c88cb --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2013 Joshua Tacoma + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go b/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go new file mode 100644 index 00000000..7c103ba1 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go @@ -0,0 +1,220 @@ +// Copyright 2013 Joshua Tacoma. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uritemplates is a level 3 implementation of RFC 6570 (URI +// Template, http://tools.ietf.org/html/rfc6570). +// uritemplates does not support composite values (in Go: slices or maps) +// and so does not qualify as a level 4 implementation. +package uritemplates + +import ( + "bytes" + "errors" + "regexp" + "strconv" + "strings" +) + +var ( + unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]") + reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]") + validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$") + hex = []byte("0123456789ABCDEF") +) + +func pctEncode(src []byte) []byte { + dst := make([]byte, len(src)*3) + for i, b := range src { + buf := dst[i*3 : i*3+3] + buf[0] = 0x25 + buf[1] = hex[b/16] + buf[2] = hex[b%16] + } + return dst +} + +func escape(s string, allowReserved bool) string { + if allowReserved { + return string(reserved.ReplaceAllFunc([]byte(s), pctEncode)) + } + return string(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) +} + +// A uriTemplate is a parsed representation of a URI template. +type uriTemplate struct { + raw string + parts []templatePart +} + +// parse parses a URI template string into a uriTemplate object. +func parse(rawTemplate string) (*uriTemplate, error) { + split := strings.Split(rawTemplate, "{") + parts := make([]templatePart, len(split)*2-1) + for i, s := range split { + if i == 0 { + if strings.Contains(s, "}") { + return nil, errors.New("unexpected }") + } + parts[i].raw = s + continue + } + subsplit := strings.Split(s, "}") + if len(subsplit) != 2 { + return nil, errors.New("malformed template") + } + expression := subsplit[0] + var err error + parts[i*2-1], err = parseExpression(expression) + if err != nil { + return nil, err + } + parts[i*2].raw = subsplit[1] + } + return &uriTemplate{ + raw: rawTemplate, + parts: parts, + }, nil +} + +type templatePart struct { + raw string + terms []templateTerm + first string + sep string + named bool + ifemp string + allowReserved bool +} + +type templateTerm struct { + name string + explode bool + truncate int +} + +func parseExpression(expression string) (result templatePart, err error) { + switch expression[0] { + case '+': + result.sep = "," + result.allowReserved = true + expression = expression[1:] + case '.': + result.first = "." + result.sep = "." + expression = expression[1:] + case '/': + result.first = "/" + result.sep = "/" + expression = expression[1:] + case ';': + result.first = ";" + result.sep = ";" + result.named = true + expression = expression[1:] + case '?': + result.first = "?" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '&': + result.first = "&" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '#': + result.first = "#" + result.sep = "," + result.allowReserved = true + expression = expression[1:] + default: + result.sep = "," + } + rawterms := strings.Split(expression, ",") + result.terms = make([]templateTerm, len(rawterms)) + for i, raw := range rawterms { + result.terms[i], err = parseTerm(raw) + if err != nil { + break + } + } + return result, err +} + +func parseTerm(term string) (result templateTerm, err error) { + // TODO(djd): Remove "*" suffix parsing once we check that no APIs have + // mistakenly used that attribute. + if strings.HasSuffix(term, "*") { + result.explode = true + term = term[:len(term)-1] + } + split := strings.Split(term, ":") + if len(split) == 1 { + result.name = term + } else if len(split) == 2 { + result.name = split[0] + var parsed int64 + parsed, err = strconv.ParseInt(split[1], 10, 0) + result.truncate = int(parsed) + } else { + err = errors.New("multiple colons in same term") + } + if !validname.MatchString(result.name) { + err = errors.New("not a valid name: " + result.name) + } + if result.explode && result.truncate > 0 { + err = errors.New("both explode and prefix modifers on same term") + } + return result, err +} + +// Expand expands a URI template with a set of values to produce a string. +func (t *uriTemplate) Expand(values map[string]string) string { + var buf bytes.Buffer + for _, p := range t.parts { + p.expand(&buf, values) + } + return buf.String() +} + +func (tp *templatePart) expand(buf *bytes.Buffer, values map[string]string) { + if len(tp.raw) > 0 { + buf.WriteString(tp.raw) + return + } + var first = true + for _, term := range tp.terms { + value, exists := values[term.name] + if !exists { + continue + } + if first { + buf.WriteString(tp.first) + first = false + } else { + buf.WriteString(tp.sep) + } + tp.expandString(buf, term, value) + } +} + +func (tp *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) { + if tp.named { + buf.WriteString(name) + if empty { + buf.WriteString(tp.ifemp) + } else { + buf.WriteString("=") + } + } +} + +func (tp *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) { + if len(s) > t.truncate && t.truncate > 0 { + s = s[:t.truncate] + } + tp.expandName(buf, t.name, len(s) == 0) + buf.WriteString(escape(s, tp.allowReserved)) +} diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go b/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go new file mode 100644 index 00000000..eff260a6 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go @@ -0,0 +1,13 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uritemplates + +func Expand(path string, values map[string]string) (string, error) { + template, err := parse(path) + if err != nil { + return "", err + } + return template.Expand(values), nil +} diff --git a/vendor/google.golang.org/api/googleapi/types.go b/vendor/google.golang.org/api/googleapi/types.go new file mode 100644 index 00000000..a02b4b07 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/types.go @@ -0,0 +1,182 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package googleapi + +import ( + "encoding/json" + "strconv" +) + +// Int64s is a slice of int64s that marshal as quoted strings in JSON. +type Int64s []int64 + +func (q *Int64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return err + } + *q = append(*q, int64(v)) + } + return nil +} + +// Int32s is a slice of int32s that marshal as quoted strings in JSON. +type Int32s []int32 + +func (q *Int32s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return err + } + *q = append(*q, int32(v)) + } + return nil +} + +// Uint64s is a slice of uint64s that marshal as quoted strings in JSON. +type Uint64s []uint64 + +func (q *Uint64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return err + } + *q = append(*q, uint64(v)) + } + return nil +} + +// Uint32s is a slice of uint32s that marshal as quoted strings in JSON. +type Uint32s []uint32 + +func (q *Uint32s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return err + } + *q = append(*q, uint32(v)) + } + return nil +} + +// Float64s is a slice of float64s that marshal as quoted strings in JSON. +type Float64s []float64 + +func (q *Float64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseFloat(s, 64) + if err != nil { + return err + } + *q = append(*q, float64(v)) + } + return nil +} + +func quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) { + dst := make([]byte, 0, 2+n*10) // somewhat arbitrary + dst = append(dst, '[') + for i := 0; i < n; i++ { + if i > 0 { + dst = append(dst, ',') + } + dst = append(dst, '"') + dst = fn(dst, i) + dst = append(dst, '"') + } + dst = append(dst, ']') + return dst, nil +} + +func (s Int64s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendInt(dst, s[i], 10) + }) +} + +func (s Int32s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendInt(dst, int64(s[i]), 10) + }) +} + +func (s Uint64s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendUint(dst, s[i], 10) + }) +} + +func (s Uint32s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendUint(dst, uint64(s[i]), 10) + }) +} + +func (s Float64s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendFloat(dst, s[i], 'g', -1, 64) + }) +} + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { return &v } From 91e036d7b445a92e411cb89306977487e6cb514f Mon Sep 17 00:00:00 2001 From: Paul Zabelin Date: Sun, 1 May 2016 14:48:19 -0700 Subject: [PATCH 171/195] add Homebrew installation instructions --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index d908450c..c2836869 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,12 @@ If you want to compile from source you need the [go toolchain](http://golang.org Version 1.5 or higher. ## Installation +### [Homebrew](http://brew.sh) on Mac +``` +brew tap paulz/gdrive +brew install gdrive +``` +### Other Download `gdrive` from one of the links below. On unix systems run `chmod +x gdrive` after download to make the binary executable. The first time gdrive is launched (i.e. run `gdrive about` in your From e605f0cb100191bab13562221a50a873c97ed352 Mon Sep 17 00:00:00 2001 From: Amigo Developer Date: Mon, 2 May 2016 13:45:59 -0700 Subject: [PATCH 172/195] remove tap from install instructions as now https://github.com/Homebrew/homebrew-core/pull/754 has been accepted we do not to tap --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index c2836869..e3001d26 100644 --- a/README.md +++ b/README.md @@ -11,9 +11,8 @@ If you want to compile from source you need the [go toolchain](http://golang.org Version 1.5 or higher. ## Installation -### [Homebrew](http://brew.sh) on Mac +### With [Homebrew](http://brew.sh) on Mac ``` -brew tap paulz/gdrive brew install gdrive ``` ### Other From cddaa25f07f2e9e5c7406cf400420fff478aaf54 Mon Sep 17 00:00:00 2001 From: Anmol Singh Jaggi Date: Mon, 20 Jun 2016 01:45:15 +0530 Subject: [PATCH 173/195] Fixed typo `directoy` -> `directory` --- drive/sync_upload.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drive/sync_upload.go b/drive/sync_upload.go index 261497d1..f1e43a49 100644 --- a/drive/sync_upload.go +++ b/drive/sync_upload.go @@ -120,7 +120,7 @@ func (self *Drive) prepareSyncRoot(args UploadSyncArgs) (*drive.File, error) { // Ensure that the directory is empty if !isEmpty { - return nil, fmt.Errorf("Root directoy is not empty, the initial sync requires an empty directory") + return nil, fmt.Errorf("Root directory is not empty, the initial sync requires an empty directory") } // Update directory with syncRoot property From 5406c618d2b271959438908845b49b4d553c6bc6 Mon Sep 17 00:00:00 2001 From: Jos van den Oever Date: Wed, 24 Aug 2016 23:14:53 +0200 Subject: [PATCH 174/195] Add --mime option to gdrive import --- drive/import.go | 8 ++++++-- gdrive.go | 5 +++++ handlers_drive.go | 1 + 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/drive/import.go b/drive/import.go index 2ee5f1e1..0162aa34 100644 --- a/drive/import.go +++ b/drive/import.go @@ -11,15 +11,19 @@ import ( type ImportArgs struct { Out io.Writer + Mime string Progress io.Writer Path string Parents []string } func (self *Drive) Import(args ImportArgs) error { - fromMime := getMimeType(args.Path) + fromMime := args.Mime if fromMime == "" { - return fmt.Errorf("Could not determine mime type of file") + fromMime = getMimeType(args.Path) + } + if fromMime == "" { + return fmt.Errorf("Could not determine mime type of file, use --mime") } about, err := self.service.About.Get().Fields("importFormats").Do() diff --git a/gdrive.go b/gdrive.go index ecb69503..90091927 100644 --- a/gdrive.go +++ b/gdrive.go @@ -719,6 +719,11 @@ func main() { Description: "Hide progress", OmitValue: true, }, + cli.StringFlag{ + Name: "mime", + Patterns: []string{"--mime"}, + Description: "Mime type of imported file", + }, ), }, }, diff --git a/handlers_drive.go b/handlers_drive.go index 8db7329b..c89b8c29 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -190,6 +190,7 @@ func infoHandler(ctx cli.Context) { func importHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).Import(drive.ImportArgs{ + Mime: args.String("mime"), Out: os.Stdout, Path: args.String("path"), Parents: args.StringSlice("parent"), From 41ce2f5b472cb4caf14b8814a43b22eb6d2da9e4 Mon Sep 17 00:00:00 2001 From: Fabio Di Fabio Date: Thu, 1 Sep 2016 16:07:28 +0200 Subject: [PATCH 175/195] add the parameter to specify the Google Apps domain when the sharing type is domain --- drive/share.go | 2 ++ gdrive.go | 5 +++++ handlers_drive.go | 1 + 3 files changed, 8 insertions(+) diff --git a/drive/share.go b/drive/share.go index 69b9c7d8..e942c170 100644 --- a/drive/share.go +++ b/drive/share.go @@ -13,6 +13,7 @@ type ShareArgs struct { Role string Type string Email string + Domain string Discoverable bool } @@ -22,6 +23,7 @@ func (self *Drive) Share(args ShareArgs) error { Role: args.Role, Type: args.Type, EmailAddress: args.Email, + Domain: args.Domain, } _, err := self.service.Permissions.Create(args.FileId, permission).Do() diff --git a/gdrive.go b/gdrive.go index ecb69503..b6078278 100644 --- a/gdrive.go +++ b/gdrive.go @@ -380,6 +380,11 @@ func main() { Patterns: []string{"--email"}, Description: "The email address of the user or group to share the file with. Requires 'user' or 'group' as type", }, + cli.StringFlag{ + Name: "domain", + Patterns: []string{"--domain"}, + Description: "The name of Google Apps domain. Requires 'domain' as type", + }, cli.BoolFlag{ Name: "discoverable", Patterns: []string{"--discoverable"}, diff --git a/handlers_drive.go b/handlers_drive.go index 8db7329b..07c12d47 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -240,6 +240,7 @@ func shareHandler(ctx cli.Context) { Role: args.String("role"), Type: args.String("type"), Email: args.String("email"), + Domain: args.String("domain"), Discoverable: args.Bool("discoverable"), }) checkErr(err) From 025f6fc019eb42a51c07ade00a966d9bc10b3874 Mon Sep 17 00:00:00 2001 From: Roberto Gambuzzi Date: Tue, 6 Sep 2016 14:59:19 +0100 Subject: [PATCH 176/195] Add the skip parameter to download and download query commands --- drive/download.go | 22 +++++++++++++++++----- gdrive.go | 15 ++++++++++++++- handlers_drive.go | 9 ++++++--- 3 files changed, 37 insertions(+), 9 deletions(-) diff --git a/drive/download.go b/drive/download.go index ec1af8a3..e4a41412 100644 --- a/drive/download.go +++ b/drive/download.go @@ -2,12 +2,13 @@ package drive import ( "fmt" - "google.golang.org/api/drive/v3" - "google.golang.org/api/googleapi" "io" "os" "path/filepath" "time" + + "google.golang.org/api/drive/v3" + "google.golang.org/api/googleapi" ) type DownloadArgs struct { @@ -16,6 +17,7 @@ type DownloadArgs struct { Id string Path string Force bool + Skip bool Recursive bool Delete bool Stdout bool @@ -68,6 +70,7 @@ type DownloadQueryArgs struct { Query string Path string Force bool + Skip bool Recursive bool } @@ -86,6 +89,7 @@ func (self *Drive) DownloadQuery(args DownloadQueryArgs) error { Progress: args.Progress, Path: args.Path, Force: args.Force, + Skip: args.Skip, } for _, f := range files { @@ -147,6 +151,7 @@ func (self *Drive) downloadBinary(f *drive.File, args DownloadArgs) (int64, int6 contentLength: res.ContentLength, fpath: fpath, force: args.Force, + skip: args.Skip, stdout: args.Stdout, progress: args.Progress, }) @@ -158,6 +163,7 @@ type saveFileArgs struct { contentLength int64 fpath string force bool + skip bool stdout bool progress io.Writer } @@ -172,9 +178,15 @@ func (self *Drive) saveFile(args saveFileArgs) (int64, int64, error) { return 0, 0, err } - // Check if file exists - if !args.force && fileExists(args.fpath) { - return 0, 0, fmt.Errorf("File '%s' already exists, use --force to overwrite", args.fpath) + // Check if file exists to force + if !args.skip && !args.force && fileExists(args.fpath) { + return 0, 0, fmt.Errorf("File '%s' already exists, use --force to overwrite or --skip to skip", args.fpath) + } + + //Check if file exists to skip + if args.skip && fileExists(args.fpath) { + fmt.Printf("File '%s' already exists, skipping\n", args.fpath) + return 0, 0, nil } // Ensure any parent directories exists diff --git a/gdrive.go b/gdrive.go index ecb69503..ef52f30f 100644 --- a/gdrive.go +++ b/gdrive.go @@ -2,8 +2,9 @@ package main import ( "fmt" - "github.com/prasmussen/gdrive/cli" "os" + + "github.com/prasmussen/gdrive/cli" ) const Name = "gdrive" @@ -106,6 +107,12 @@ func main() { Description: "Overwrite existing file", OmitValue: true, }, + cli.BoolFlag{ + Name: "skip", + Patterns: []string{"-s", "--skip"}, + Description: "Skip existing files", + OmitValue: true, + }, cli.BoolFlag{ Name: "recursive", Patterns: []string{"-r", "--recursive"}, @@ -157,6 +164,12 @@ func main() { Description: "Overwrite existing file", OmitValue: true, }, + cli.BoolFlag{ + Name: "skip", + Patterns: []string{"-s", "--skip"}, + Description: "Skip existing files", + OmitValue: true, + }, cli.BoolFlag{ Name: "recursive", Patterns: []string{"-r", "--recursive"}, diff --git a/handlers_drive.go b/handlers_drive.go index 8db7329b..415224d4 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -2,15 +2,16 @@ package main import ( "fmt" - "github.com/prasmussen/gdrive/auth" - "github.com/prasmussen/gdrive/cli" - "github.com/prasmussen/gdrive/drive" "io" "io/ioutil" "net/http" "os" "path/filepath" "time" + + "github.com/prasmussen/gdrive/auth" + "github.com/prasmussen/gdrive/cli" + "github.com/prasmussen/gdrive/drive" ) const ClientId = "367116221053-7n0vf5akeru7on6o2fjinrecpdoe99eg.apps.googleusercontent.com" @@ -53,6 +54,7 @@ func downloadHandler(ctx cli.Context) { Out: os.Stdout, Id: args.String("fileId"), Force: args.Bool("force"), + Skip: args.Bool("skip"), Path: args.String("path"), Delete: args.Bool("delete"), Recursive: args.Bool("recursive"), @@ -69,6 +71,7 @@ func downloadQueryHandler(ctx cli.Context) { Out: os.Stdout, Query: args.String("query"), Force: args.Bool("force"), + Skip: args.Bool("skip"), Recursive: args.Bool("recursive"), Path: args.String("path"), Progress: progressWriter(args.Bool("noProgress")), From 105055af9158bd5657907bedf4fb1d6bc477657e Mon Sep 17 00:00:00 2001 From: Alexander Kjeldaas Date: Mon, 12 Sep 2016 14:36:18 +0200 Subject: [PATCH 177/195] Fix #159 Document how to list files in a subdirectory. --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index e3001d26..9a1428ef 100644 --- a/README.md +++ b/README.md @@ -143,6 +143,13 @@ options: --bytes Size in bytes ``` +List file in subdirectory + + +``` +./gdrive list --query " 'IdOfTheParentFolder' in parents" +``` + #### Download file or directory ``` gdrive [global] download [options] From 8f5194c7154b20965250f6333226a18e225647dd Mon Sep 17 00:00:00 2001 From: Javier Blazquez Date: Tue, 20 Sep 2016 16:03:22 -0700 Subject: [PATCH 178/195] Added --description option --- README.md | 48 ++++++++++++++++++--------------- drive/mkdir.go | 13 ++++++--- drive/update.go | 23 ++++++++-------- drive/upload.go | 54 ++++++++++++++++++++----------------- gdrive.go | 22 ++++++++++++++- handlers_drive.go | 68 +++++++++++++++++++++++++---------------------- 6 files changed, 133 insertions(+), 95 deletions(-) diff --git a/README.md b/README.md index 9a1428ef..c031d4d6 100644 --- a/README.md +++ b/README.md @@ -195,15 +195,16 @@ global: --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) options: - -r, --recursive Upload directory recursively - -p, --parent Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents - --name Filename - --no-progress Hide progress - --mime Force mime type - --share Share file - --delete Delete local file when upload is successful - --timeout Set timeout in seconds, use 0 for no timeout. Timeout is reached when no data is transferred in set amount of seconds, default: 300 - --chunksize Set chunk size in bytes, default: 8388608 + -r, --recursive Upload directory recursively + -p, --parent Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents + --name Filename + --description File description + --no-progress Hide progress + --mime Force mime type + --share Share file + --delete Delete local file when upload is successful + --timeout Set timeout in seconds, use 0 for no timeout. Timeout is reached when no data is transferred in set amount of seconds, default: 300 + --chunksize Set chunk size in bytes, default: 8388608 ``` #### Upload file from stdin @@ -216,12 +217,13 @@ global: --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) options: - -p, --parent Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents - --chunksize Set chunk size in bytes, default: 8388608 - --mime Force mime type - --share Share file - --timeout Set timeout in seconds, use 0 for no timeout. Timeout is reached when no data is transferred in set amount of seconds, default: 300 - --no-progress Hide progress + -p, --parent Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents + --chunksize Set chunk size in bytes, default: 8388608 + --description File description + --mime Force mime type + --share Share file + --timeout Set timeout in seconds, use 0 for no timeout. Timeout is reached when no data is transferred in set amount of seconds, default: 300 + --no-progress Hide progress ``` #### Update file, this creates a new revision of the file @@ -234,12 +236,13 @@ global: --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) options: - -p, --parent Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents - --name Filename - --no-progress Hide progress - --mime Force mime type - --timeout Set timeout in seconds, use 0 for no timeout. Timeout is reached when no data is transferred in set amount of seconds, default: 300 - --chunksize Set chunk size in bytes, default: 8388608 + -p, --parent Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents + --name Filename + --description File description + --no-progress Hide progress + --mime Force mime type + --timeout Set timeout in seconds, use 0 for no timeout. Timeout is reached when no data is transferred in set amount of seconds, default: 300 + --chunksize Set chunk size in bytes, default: 8388608 ``` #### Show file info @@ -265,7 +268,8 @@ global: --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) options: - -p, --parent Parent id of created directory, can be specified multiple times to give many parents + -p, --parent Parent id of created directory, can be specified multiple times to give many parents + --description Directory description ``` #### Share file or directory diff --git a/drive/mkdir.go b/drive/mkdir.go index 8eea210b..05d51914 100644 --- a/drive/mkdir.go +++ b/drive/mkdir.go @@ -9,9 +9,10 @@ import ( const DirectoryMimeType = "application/vnd.google-apps.folder" type MkdirArgs struct { - Out io.Writer - Name string - Parents []string + Out io.Writer + Name string + Description string + Parents []string } func (self *Drive) Mkdir(args MkdirArgs) error { @@ -24,7 +25,11 @@ func (self *Drive) Mkdir(args MkdirArgs) error { } func (self *Drive) mkdir(args MkdirArgs) (*drive.File, error) { - dstFile := &drive.File{Name: args.Name, MimeType: DirectoryMimeType} + dstFile := &drive.File{ + Name: args.Name, + Description: args.Description, + MimeType: DirectoryMimeType, + } // Set parent folders dstFile.Parents = args.Parents diff --git a/drive/update.go b/drive/update.go index 2ab684e9..f496f52b 100644 --- a/drive/update.go +++ b/drive/update.go @@ -11,16 +11,17 @@ import ( ) type UpdateArgs struct { - Out io.Writer - Progress io.Writer - Id string - Path string - Name string - Parents []string - Mime string - Recursive bool - ChunkSize int64 - Timeout time.Duration + Out io.Writer + Progress io.Writer + Id string + Path string + Name string + Description string + Parents []string + Mime string + Recursive bool + ChunkSize int64 + Timeout time.Duration } func (self *Drive) Update(args UpdateArgs) error { @@ -32,7 +33,7 @@ func (self *Drive) Update(args UpdateArgs) error { defer srcFile.Close() // Instantiate empty drive file - dstFile := &drive.File{} + dstFile := &drive.File{Description: args.Description} // Use provided file name or use filename if args.Name == "" { diff --git a/drive/upload.go b/drive/upload.go index dbf068ca..a4482e20 100644 --- a/drive/upload.go +++ b/drive/upload.go @@ -12,17 +12,18 @@ import ( ) type UploadArgs struct { - Out io.Writer - Progress io.Writer - Path string - Name string - Parents []string - Mime string - Recursive bool - Share bool - Delete bool - ChunkSize int64 - Timeout time.Duration + Out io.Writer + Progress io.Writer + Path string + Name string + Description string + Parents []string + Mime string + Recursive bool + Share bool + Delete bool + ChunkSize int64 + Timeout time.Duration } func (self *Drive) Upload(args UploadArgs) error { @@ -110,9 +111,10 @@ func (self *Drive) uploadDirectory(args UploadArgs) error { fmt.Fprintf(args.Out, "Creating directory %s\n", srcFileInfo.Name()) // Make directory on drive f, err := self.mkdir(MkdirArgs{ - Out: args.Out, - Name: srcFileInfo.Name(), - Parents: args.Parents, + Out: args.Out, + Name: srcFileInfo.Name(), + Parents: args.Parents, + Description: args.Description, }) if err != nil { return err @@ -129,6 +131,7 @@ func (self *Drive) uploadDirectory(args UploadArgs) error { newArgs := args newArgs.Path = filepath.Join(args.Path, name) newArgs.Parents = []string{f.Id} + newArgs.Description = "" // Upload err = self.uploadRecursive(newArgs) @@ -150,7 +153,7 @@ func (self *Drive) uploadFile(args UploadArgs) (*drive.File, int64, error) { defer srcFile.Close() // Instantiate empty drive file - dstFile := &drive.File{} + dstFile := &drive.File{Description: args.Description} // Use provided file name or use filename if args.Name == "" { @@ -196,15 +199,16 @@ func (self *Drive) uploadFile(args UploadArgs) (*drive.File, int64, error) { } type UploadStreamArgs struct { - Out io.Writer - In io.Reader - Name string - Parents []string - Mime string - Share bool - ChunkSize int64 - Progress io.Writer - Timeout time.Duration + Out io.Writer + In io.Reader + Name string + Description string + Parents []string + Mime string + Share bool + ChunkSize int64 + Progress io.Writer + Timeout time.Duration } func (self *Drive) UploadStream(args UploadStreamArgs) error { @@ -213,7 +217,7 @@ func (self *Drive) UploadStream(args UploadStreamArgs) error { } // Instantiate empty drive file - dstFile := &drive.File{Name: args.Name} + dstFile := &drive.File{Name: args.Name, Description: args.Description} // Set mime type if provided if args.Mime != "" { diff --git a/gdrive.go b/gdrive.go index 4833eaa4..971dc4c4 100644 --- a/gdrive.go +++ b/gdrive.go @@ -213,6 +213,11 @@ func main() { Patterns: []string{"--name"}, Description: "Filename", }, + cli.StringFlag{ + Name: "description", + Patterns: []string{"--description"}, + Description: "File description", + }, cli.BoolFlag{ Name: "noProgress", Patterns: []string{"--no-progress"}, @@ -269,6 +274,11 @@ func main() { Description: fmt.Sprintf("Set chunk size in bytes, default: %d", DefaultUploadChunkSize), DefaultValue: DefaultUploadChunkSize, }, + cli.StringFlag{ + Name: "description", + Patterns: []string{"--description"}, + Description: "File description", + }, cli.StringFlag{ Name: "mime", Patterns: []string{"--mime"}, @@ -312,6 +322,11 @@ func main() { Patterns: []string{"--name"}, Description: "Filename", }, + cli.StringFlag{ + Name: "description", + Patterns: []string{"--description"}, + Description: "File description", + }, cli.BoolFlag{ Name: "noProgress", Patterns: []string{"--no-progress"}, @@ -366,6 +381,11 @@ func main() { Patterns: []string{"-p", "--parent"}, Description: "Parent id of created directory, can be specified multiple times to give many parents", }, + cli.StringFlag{ + Name: "description", + Patterns: []string{"--description"}, + Description: "Directory description", + }, ), }, }, @@ -397,7 +417,7 @@ func main() { Name: "domain", Patterns: []string{"--domain"}, Description: "The name of Google Apps domain. Requires 'domain' as type", - }, + }, cli.BoolFlag{ Name: "discoverable", Patterns: []string{"--discoverable"}, diff --git a/handlers_drive.go b/handlers_drive.go index 5b651baf..5240566f 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -115,17 +115,18 @@ func uploadHandler(ctx cli.Context) { args := ctx.Args() checkUploadArgs(args) err := newDrive(args).Upload(drive.UploadArgs{ - Out: os.Stdout, - Progress: progressWriter(args.Bool("noProgress")), - Path: args.String("path"), - Name: args.String("name"), - Parents: args.StringSlice("parent"), - Mime: args.String("mime"), - Recursive: args.Bool("recursive"), - Share: args.Bool("share"), - Delete: args.Bool("delete"), - ChunkSize: args.Int64("chunksize"), - Timeout: durationInSeconds(args.Int64("timeout")), + Out: os.Stdout, + Progress: progressWriter(args.Bool("noProgress")), + Path: args.String("path"), + Name: args.String("name"), + Description: args.String("description"), + Parents: args.StringSlice("parent"), + Mime: args.String("mime"), + Recursive: args.Bool("recursive"), + Share: args.Bool("share"), + Delete: args.Bool("delete"), + ChunkSize: args.Int64("chunksize"), + Timeout: durationInSeconds(args.Int64("timeout")), }) checkErr(err) } @@ -133,15 +134,16 @@ func uploadHandler(ctx cli.Context) { func uploadStdinHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).UploadStream(drive.UploadStreamArgs{ - Out: os.Stdout, - In: os.Stdin, - Name: args.String("name"), - Parents: args.StringSlice("parent"), - Mime: args.String("mime"), - Share: args.Bool("share"), - ChunkSize: args.Int64("chunksize"), - Timeout: durationInSeconds(args.Int64("timeout")), - Progress: progressWriter(args.Bool("noProgress")), + Out: os.Stdout, + In: os.Stdin, + Name: args.String("name"), + Description: args.String("description"), + Parents: args.StringSlice("parent"), + Mime: args.String("mime"), + Share: args.Bool("share"), + ChunkSize: args.Int64("chunksize"), + Timeout: durationInSeconds(args.Int64("timeout")), + Progress: progressWriter(args.Bool("noProgress")), }) checkErr(err) } @@ -167,15 +169,16 @@ func uploadSyncHandler(ctx cli.Context) { func updateHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).Update(drive.UpdateArgs{ - Out: os.Stdout, - Id: args.String("fileId"), - Path: args.String("path"), - Name: args.String("name"), - Parents: args.StringSlice("parent"), - Mime: args.String("mime"), - Progress: progressWriter(args.Bool("noProgress")), - ChunkSize: args.Int64("chunksize"), - Timeout: durationInSeconds(args.Int64("timeout")), + Out: os.Stdout, + Id: args.String("fileId"), + Path: args.String("path"), + Name: args.String("name"), + Description: args.String("description"), + Parents: args.StringSlice("parent"), + Mime: args.String("mime"), + Progress: progressWriter(args.Bool("noProgress")), + ChunkSize: args.Int64("chunksize"), + Timeout: durationInSeconds(args.Int64("timeout")), }) checkErr(err) } @@ -229,9 +232,10 @@ func listRevisionsHandler(ctx cli.Context) { func mkdirHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).Mkdir(drive.MkdirArgs{ - Out: os.Stdout, - Name: args.String("name"), - Parents: args.StringSlice("parent"), + Out: os.Stdout, + Name: args.String("name"), + Description: args.String("description"), + Parents: args.StringSlice("parent"), }) checkErr(err) } From 9f5145bd2a165501b2ed830b1db68e9f2a045b9a Mon Sep 17 00:00:00 2001 From: dan smith Date: Mon, 24 Oct 2016 22:07:52 -0400 Subject: [PATCH 179/195] clarify that only the root .gdriveignore is used --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index c031d4d6..871b64be 100644 --- a/README.md +++ b/README.md @@ -88,7 +88,7 @@ To learn more see usage and the examples below. #### .gdriveignore Placing a .gdriveignore in the root of your sync directory can be used to skip certain files from being synced. .gdriveignore follows the same -rules as [.gitignore](https://git-scm.com/docs/gitignore). +rules as [.gitignore](https://git-scm.com/docs/gitignore), except that gdrive only reads the .gdriveignore file in the root of the sync directory, not ones in any subdirectories. ## Usage From 0e05cfb6f84a714f9bdafade6d3cddbb8c8273e9 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Wed, 18 Jan 2017 20:40:51 +0100 Subject: [PATCH 180/195] Update windows 64-bit download link --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index c031d4d6..b2b91c62 100644 --- a/README.md +++ b/README.md @@ -45,7 +45,7 @@ You will be prompted for a new verification code if the folder does not exist. | [gdrive-linux-ppc64](https://docs.google.com/uc?id=0B3X9GlR6EmbnS09XMzhfRXBnUzA&export=download) | 2.1.0 | Linux PPC 64-bit | 70a1ac5be9ba819da5cf7a8dbd513805a26509ac | | [gdrive-linux-ppc64le](https://docs.google.com/uc?id=0B3X9GlR6EmbneDJ2b3hqbVlNZnc&export=download) | 2.1.0 | Linux PPC 64-bit le | f426817ee4824b83b978f82f8e72eac6db92f2d1 | | [gdrive-windows-386.exe](https://docs.google.com/uc?id=0B3X9GlR6EmbnV3RNeFVUQjZvS2c&export=download) | 2.1.0 | Window 32-bit | 1429200631b598543eddc3df3487117cad95adbb | -| [gdrive-windows-x64.exe](https://docs.google.com/uc?id=0B3X9GlR6EmbnNFRSSW1GaFBSRk0&export=download) | 2.1.0 | Windows 64-bit | 16ccab7c66b144e5806daeb2ba50d567b51504ca | +| [gdrive-windows-x64.exe](https://drive.google.com/uc?id=0B3X9GlR6EmbnbnBsTXlfS1J5UjQ&export=download) | 2.1.0 | Windows 64-bit | 17f692a027a049385af2576503cd376593cc87b7 | | [gdrive-dragonfly-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnelNIdmRMMGpVa2s&export=download) | 2.1.0 | DragonFly BSD 64-bit | dc214a24e59f68d99ca62757d99099051f83804a | | [gdrive-freebsd-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnQkN0NnUwZ0tKLXM&export=download) | 2.1.0 | FreeBSD 64-bit | 93a5581652f9c01c47fb6c16e8ae655182f265da | | [gdrive-freebsd-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnNU5rbXBzeEhhOTA&export=download) | 2.1.0 | FreeBSD 32-bit | b9a3ee1e0fdbb5fa970942ab89b354ee863a5758 | From b33b3e96eb6443ae3dff61e0e7b38bb529e328ea Mon Sep 17 00:00:00 2001 From: Fabio Di Fabio Date: Thu, 26 Jan 2017 15:03:51 +0100 Subject: [PATCH 181/195] New feature, support authentication via service account. --- README.md | 71 +++++++++++++++++++++++++++++++++------------ auth/file_source.go | 15 ++++++++-- auth/oauth.go | 18 ++++++++++++ gdrive.go | 5 ++++ handlers_drive.go | 10 +++++++ 5 files changed, 98 insertions(+), 21 deletions(-) diff --git a/README.md b/README.md index b2b91c62..500467a0 100644 --- a/README.md +++ b/README.md @@ -85,6 +85,14 @@ syncing many files. Currently only one file is uploaded at the time, the speed can be improved in the future by uploading several files concurrently. To learn more see usage and the examples below. +### Service Account +For server to server communication, where user interaction is not a viable option, +is it possible to use a service account, as described in this [Google document](https://developers.google.com/identity/protocols/OAuth2ServiceAccount). +If you want to use a service account, instead of being interactively prompted for +authentication, you need to use the `--service-account ` +global option, where `serviceAccountCredentials` is a file in JSON format obtained +through the Google API Console, and its location is relative to the config dir. + #### .gdriveignore Placing a .gdriveignore in the root of your sync directory can be used to skip certain files from being synced. .gdriveignore follows the same @@ -132,6 +140,7 @@ global: -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) + --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) options: -m, --max Max files to list, default: 30 @@ -158,7 +167,8 @@ global: -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) - + --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) + options: -f, --force Overwrite existing file -r, --recursive Download directory recursively, documents will be skipped @@ -177,7 +187,8 @@ global: -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) - + --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) + options: -f, --force Overwrite existing file -r, --recursive Download directories recursively, documents will be skipped @@ -193,7 +204,8 @@ global: -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) - + --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) + options: -r, --recursive Upload directory recursively -p, --parent Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents @@ -215,7 +227,8 @@ global: -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) - + --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) + options: -p, --parent Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents --chunksize Set chunk size in bytes, default: 8388608 @@ -234,7 +247,8 @@ global: -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) - + --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) + options: -p, --parent Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents --name Filename @@ -253,7 +267,8 @@ global: -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) - + --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) + options: --bytes Show size in bytes ``` @@ -266,7 +281,8 @@ global: -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) - + --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) + options: -p, --parent Parent id of created directory, can be specified multiple times to give many parents --description Directory description @@ -280,7 +296,8 @@ global: -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) - + --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) + options: --role Share role: owner/writer/commenter/reader, default: reader --type Share type: user/group/domain/anyone, default: anyone @@ -297,6 +314,7 @@ global: -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) + --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) ``` #### Revoke permission @@ -307,6 +325,7 @@ global: -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) + --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) ``` #### Delete file or directory @@ -317,7 +336,8 @@ global: -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) - + --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) + options: -r, --recursive Delete directory and all it's content ``` @@ -330,7 +350,8 @@ global: -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) - + --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) + options: --no-header Dont print the header ``` @@ -343,7 +364,8 @@ global: -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) - + --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) + options: --order Sort order. See https://godoc.org/google.golang.org/api/drive/v3#FilesListCall.OrderBy --path-width Width of path column, default: 60, minimum: 9, use 0 for full width @@ -359,7 +381,8 @@ global: -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) - + --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) + options: --keep-remote Keep remote file when a conflict is encountered --keep-local Keep local file when a conflict is encountered @@ -378,7 +401,8 @@ global: -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) - + --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) + options: --keep-remote Keep remote file when a conflict is encountered --keep-local Keep local file when a conflict is encountered @@ -398,7 +422,8 @@ global: -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) - + --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) + options: -m, --max Max changes to list, default: 100 --since Page token to start listing changes from @@ -415,7 +440,8 @@ global: -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) - + --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) + options: --name-width Width of name column, default: 40, minimum: 9, use 0 for full width --no-header Dont print the header @@ -430,7 +456,8 @@ global: -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) - + --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) + options: -f, --force Overwrite existing file --no-progress Hide progress @@ -447,6 +474,7 @@ global: -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) + --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) ``` #### Upload and convert file to a google document, see 'about import' for available conversions @@ -457,7 +485,8 @@ global: -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) - + --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) + options: -p, --parent Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents --no-progress Hide progress @@ -471,7 +500,8 @@ global: -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) - + --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) + options: -f, --force Overwrite existing file --mime Mime type of exported file @@ -486,7 +516,8 @@ global: -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) - + --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) + options: --bytes Show size in bytes ``` @@ -499,6 +530,7 @@ global: -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) + --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) ``` #### Show supported export formats @@ -509,6 +541,7 @@ global: -c, --config Application path, default: /Users//.gdrive --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) + --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) ``` diff --git a/auth/file_source.go b/auth/file_source.go index 52002033..11e73250 100644 --- a/auth/file_source.go +++ b/auth/file_source.go @@ -31,7 +31,7 @@ func (self *fileSource) Token() (*oauth2.Token, error) { return token, nil } -func ReadToken(path string) (*oauth2.Token, bool, error) { +func ReadFile(path string) ([]byte, bool, error) { if !fileExists(path) { return nil, false, nil } @@ -40,8 +40,19 @@ func ReadToken(path string) (*oauth2.Token, bool, error) { if err != nil { return nil, true, err } + return content, true, nil +} + + +func ReadToken(path string) (*oauth2.Token, bool, error) { + + content, exists, err := ReadFile(path) + if err != nil { + return nil, exists, err + } + token := &oauth2.Token{} - return token, true, json.Unmarshal(content, token) + return token, exists, json.Unmarshal(content, token) } func SaveToken(path string, token *oauth2.Token) error { diff --git a/auth/oauth.go b/auth/oauth.go index 150642cc..bc567385 100644 --- a/auth/oauth.go +++ b/auth/oauth.go @@ -3,6 +3,7 @@ package auth import ( "fmt" "golang.org/x/oauth2" + "golang.org/x/oauth2/google" "net/http" "time" ) @@ -64,6 +65,23 @@ func NewAccessTokenClient(clientId, clientSecret, accessToken string) *http.Clie ) } +func NewServiceAccountClient(serviceAccountFile string) (*http.Client, error) { + content, exists, err := ReadFile(serviceAccountFile) + if(!exists) { + return nil, fmt.Errorf("Service account filename %q not found", serviceAccountFile) + } + + if(err != nil) { + return nil, err + } + + conf, err := google.JWTConfigFromJSON(content, "https://www.googleapis.com/auth/drive") + if(err != nil) { + return nil, err + } + return conf.Client(oauth2.NoContext), nil +} + func getConfig(clientId, clientSecret string) *oauth2.Config { return &oauth2.Config{ ClientID: clientId, diff --git a/gdrive.go b/gdrive.go index 971dc4c4..c1a817e7 100644 --- a/gdrive.go +++ b/gdrive.go @@ -40,6 +40,11 @@ func main() { Patterns: []string{"--access-token"}, Description: "Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users)", }, + cli.StringFlag{ + Name: "serviceAccount", + Patterns: []string{"--service-account"}, + Description: "Oauth service account filename, used for server to server communication without user interaction (filename path is relative to config dir)", + }, } handlers := []*cli.Handler{ diff --git a/handlers_drive.go b/handlers_drive.go index 5240566f..7bda872f 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -354,6 +354,16 @@ func getOauthClient(args cli.Arguments) (*http.Client, error) { } configDir := getConfigDir(args) + + if args.String("serviceAccount") != "" { + serviceAccountPath := ConfigFilePath(configDir, args.String("serviceAccount")) + serviceAccountClient, err := auth.NewServiceAccountClient(serviceAccountPath) + if err != nil { + return nil, err + } + return serviceAccountClient, nil + } + tokenPath := ConfigFilePath(configDir, TokenFilename) return auth.NewFileSourceClient(ClientId, ClientSecret, tokenPath, authCodePrompt) } From 2aa4234efa6e04fdeee3129acdaa3464aa170dcd Mon Sep 17 00:00:00 2001 From: Oskari Saarenmaa Date: Tue, 14 Feb 2017 21:52:26 +0200 Subject: [PATCH 182/195] auth/file_source: don't try to read non-existent files Commit b33b3e96e introduced a bug where we try to read a token file even if it doesn't exist, causing unauthenticated run (e.g. `gdrive about`) to fail with the error Failed getting oauth client: Failed to read token: unexpected end of JSON input Closes #257. --- auth/file_source.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/auth/file_source.go b/auth/file_source.go index 11e73250..9223d1dc 100644 --- a/auth/file_source.go +++ b/auth/file_source.go @@ -47,7 +47,7 @@ func ReadFile(path string) ([]byte, bool, error) { func ReadToken(path string) (*oauth2.Token, bool, error) { content, exists, err := ReadFile(path) - if err != nil { + if err != nil || exists == false { return nil, exists, err } From c3cbcceedd6beb1fcff30f06ea7be7c29558d181 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Fri, 8 Sep 2017 22:38:46 +0200 Subject: [PATCH 183/195] Add maintenance note --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index 843fccd7..4c6b5cd7 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,12 @@ gdrive ====== +## Note +This tool is not being actively maintained at the moment, ymmv. +For incremental and encrypted backup of unix systems [borg](https://github.com/borgbackup/borg) +is a great alternative and [rsync.net](http://rsync.net/products/attic.html) provides +a cheap and reliable backup target. + ## Overview gdrive is a command line utility for interacting with Google Drive. From 06117fa349db55980766e89d31b6d9afbccedd2a Mon Sep 17 00:00:00 2001 From: "M. Mucahid Benlioglu" Date: Mon, 11 Mar 2019 16:57:52 +0300 Subject: [PATCH 184/195] Updated maintenance note --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 4c6b5cd7..e1a1ba14 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ gdrive ## Note -This tool is not being actively maintained at the moment, ymmv. +~~This tool is not being actively maintained at the moment, ymmv~~ **Active maintenance will be resumed soon.** For incremental and encrypted backup of unix systems [borg](https://github.com/borgbackup/borg) is a great alternative and [rsync.net](http://rsync.net/products/attic.html) provides a cheap and reliable backup target. From 29ca5a922a95b9ca1f09e1ad51bc853a50d6b888 Mon Sep 17 00:00:00 2001 From: "M. Mucahid Benlioglu" Date: Fri, 19 Apr 2019 21:50:59 +0300 Subject: [PATCH 185/195] Re-compiled binaries to update go version. Fixes #450 Signed-off-by: M. Mucahid Benlioglu --- README.md | 53 ++++++++++++++++++++++++++--------------------------- 1 file changed, 26 insertions(+), 27 deletions(-) diff --git a/README.md b/README.md index e1a1ba14..c93eda0f 100644 --- a/README.md +++ b/README.md @@ -38,33 +38,32 @@ You will be prompted for a new verification code if the folder does not exist. ### Downloads | Filename | Version | Description | Shasum | |:-----------------------|:--------|:-------------------|:-----------------------------------------| -| [gdrive-osx-x64](https://docs.google.com/uc?id=0B3X9GlR6Embnb010SnpUV0s2ZkU&export=download) | 2.1.0 | OS X 64-bit | 297ccf3c945b364b5d306cef335ba44b0900e927 | -| [gdrive-osx-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnTjByNlNvZVNRTjQ&export=download) | 2.1.0 | OS X 32-bit | c64714676a5b028aeeaf09e5f3b84d363e0ec7ed | -| [gdrive-osx-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnbURvYnVyVmNNX2M&export=download) | 2.1.0 | OS X arm | eb23b7bb5a072497372bd253e8fc8353bec8a64c | -| [gdrive-linux-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnQ0FtZmJJUXEyRTA&export=download) | 2.1.0 | Linux 64-bit | 4fd8391b300cac45963e53da44dcfe68da08d843 | -| [gdrive-linux-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnLV92dHBpTkFhTEU&export=download) | 2.1.0 | Linux 32-bit | de9f49565fc62552fe862f08f84694ab4653adc2 | -| [gdrive-linux-rpi](https://docs.google.com/uc?id=0B3X9GlR6EmbnVXNLanp4ZFRRbzg&export=download) | 2.1.0 | Linux Raspberry Pi | e26e9ca3df3d08f970a276782ac5e92731c85467 | -| [gdrive-linux-arm64](https://docs.google.com/uc?id=0B3X9GlR6EmbnamliN0Rld01oRVk&export=download) | 2.1.0 | Linux arm 64-bit | 3d670905e13edf96d43c9f97293bdba62c740926 | -| [gdrive-linux-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnRjBaMVVLalN4cTA&export=download) | 2.1.0 | Linux arm 32-bit | 5b1036e0ef479ce228f7c32d1adfdc3840d71d10 | -| [gdrive-linux-mips64](https://docs.google.com/uc?id=0B3X9GlR6Embna2lzdEJ6blFzSzQ&export=download) | 2.1.0 | Linux mips 64-bit | 334bbd74b87fd1d05550e366724fe8e3c9e61ca4 | -| [gdrive-linux-mips64le](https://docs.google.com/uc?id=0B3X9GlR6EmbnWFk4Q3ZVZ1g3ZHM&export=download) | 2.1.0 | Linux mips 64-bit le | bb6961a2c03c074e6d34a1ec280cc69f5d5002f5 | -| [gdrive-linux-ppc64](https://docs.google.com/uc?id=0B3X9GlR6EmbnS09XMzhfRXBnUzA&export=download) | 2.1.0 | Linux PPC 64-bit | 70a1ac5be9ba819da5cf7a8dbd513805a26509ac | -| [gdrive-linux-ppc64le](https://docs.google.com/uc?id=0B3X9GlR6EmbneDJ2b3hqbVlNZnc&export=download) | 2.1.0 | Linux PPC 64-bit le | f426817ee4824b83b978f82f8e72eac6db92f2d1 | -| [gdrive-windows-386.exe](https://docs.google.com/uc?id=0B3X9GlR6EmbnV3RNeFVUQjZvS2c&export=download) | 2.1.0 | Window 32-bit | 1429200631b598543eddc3df3487117cad95adbb | -| [gdrive-windows-x64.exe](https://drive.google.com/uc?id=0B3X9GlR6EmbnbnBsTXlfS1J5UjQ&export=download) | 2.1.0 | Windows 64-bit | 17f692a027a049385af2576503cd376593cc87b7 | -| [gdrive-dragonfly-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnelNIdmRMMGpVa2s&export=download) | 2.1.0 | DragonFly BSD 64-bit | dc214a24e59f68d99ca62757d99099051f83804a | -| [gdrive-freebsd-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnQkN0NnUwZ0tKLXM&export=download) | 2.1.0 | FreeBSD 64-bit | 93a5581652f9c01c47fb6c16e8ae655182f265da | -| [gdrive-freebsd-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnNU5rbXBzeEhhOTA&export=download) | 2.1.0 | FreeBSD 32-bit | b9a3ee1e0fdbb5fa970942ab89b354ee863a5758 | -| [gdrive-freebsd-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnVHpUbVFzZzNqeW8&export=download) | 2.1.0 | FreeBSD arm | 7f5d1dedaa98501932ea368f2baba240da0b00d8 | -| [gdrive-netbsd-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnbGJobnBQR0dtV2c&export=download) | 2.1.0 | NetBSD 64-bit | 2a088dbd1e149204eb71a47ade109816983fe53f | -| [gdrive-netbsd-386](https://docs.google.com/uc?id=0B3X9GlR6EmbneWszMnl5RGZnYWs&export=download) | 2.1.0 | NetBSD 32-bit | a2c231b91839171a58da780657c445d4a1430537 | -| [gdrive-netbsd-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnVVhOWG9UUUhWNVU&export=download) | 2.1.0 | NetBSD arm | ac8a6354f8a8346c2bf84585e14f4a2cc69451db | -| [gdrive-openbsd-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnSy1JdFlHdUYyaGs&export=download) | 2.1.0 | OpenBSD 64-bit | 54be1d38b9014c6a8de5d71233cd6f208c27ac1c | -| [gdrive-openbsd-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnRWhIZFRCNE1OdWc&export=download) | 2.1.0 | OpenBSD 32-bit | c2e08a9c7242de6d6ffa01598425fea0550076b8 | -| [gdrive-openbsd-arm](https://docs.google.com/uc?id=0B3X9GlR6EmbnWnAzMTNZanp2UEE&export=download) | 2.1.0 | OpenBSD arm | 22cd413c2705012b2ac78e64cc9f2b5bfa96dbea | -| [gdrive-solaris-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnWEtENmQ5dDJtTHc&export=download) | 2.1.0 | Solaris 64-bit | 2da03dfcc818a0bd3588ad850349a5a2554913fb | -| [gdrive-plan9-x64](https://docs.google.com/uc?id=0B3X9GlR6EmbnRmVyelhxLUUySjA&export=download) | 2.1.0 | Plan9 64-bit | 7b498ce0f416a3e8c1e17f603d21a3e84c1a9283 | -| [gdrive-plan9-386](https://docs.google.com/uc?id=0B3X9GlR6EmbnckdHZVdRZ0dZTU0&export=download) | 2.1.0 | Plan9 32-bit | cccd9ba86774bc6bd70f092158e2fcafa94601c0 | +| [gdrive-osx-x64](https://drive.google.com/uc?id=1hs1l26Kk4xwqLlUuPB8pMfv4Z4qMvM3D&export=download) | 2.1.0 | OS X 64-bit | 2d817cab0912e0efa5529ada7a4bb73fe90d2d74 | +| [gdrive-osx-386](https://drive.google.com/uc?id=1tlzNnSGK12zviZX57qiLCnOoJBLqDlIT&export=download) | 2.1.0 | OS X 32-bit | ed1a12998270f8176c96729d80f3f7307a2e2db6 | +| [gdrive-linux-x64](https://drive.google.com/uc?id=1Ej8VgsW5RgK66Btb9p74tSdHMH3p4UNb&export=download) | 2.1.0 | Linux 64-bit | 6ef7c740e980358a9a5de36c1aac7ea375319aa3 | +| [gdrive-linux-386](https://drive.google.com/uc?id=1eo9hMXz0WyuBwRxPM0LrTtQmhTgOLUlg&export=download) | 2.1.0 | Linux 32-bit | 03f423fc7a444e95de3998d24dbfbdb0335fd055 | +| [gdrive-linux-rpi](https://drive.google.com/uc?id=1f5Q-yAwUYZYt_frYjodFHbU0v7ppQkem&export=download) | 2.1.0 | Linux Raspberry Pi | 0247d36e2591ea009ddbd003300bf10cb58089da | +| [gdrive-linux-arm64](https://drive.google.com/uc?id=1tAx8hbUy4xbGAuvUFistgWZy3twk9rBP&export=download) | 2.1.0 | Linux arm 64-bit | 3a41c27e10587eef0be20fe00415c65723fa3aee | +| [gdrive-linux-arm](https://drive.google.com/uc?id=1140F8FbMxBsvhMjV4BCznnC6qiRYmOCc&export=download) | 2.1.0 | Linux arm 32-bit | 0247d36e2591ea009ddbd003300bf10cb58089da | +| [gdrive-linux-mips64](https://drive.google.com/uc?id=1evxhgWwtb-qDqnUdk_EIr67pM9--1njT&export=download) | 2.1.0 | Linux mips 64-bit | 69bdf4b9a4ae1f9ead9d35ba118a70005bea3829 | +| [gdrive-linux-mips64le](https://drive.google.com/uc?id=1ujljxt_Psl9Kqlsnv4_wgHv4JzvaJEH_&export=download) | 2.1.0 | Linux mips 64-bit le | 421ae3662ed9b486649560512e3c8a43dbf6cdc6 | +| [gdrive-linux-ppc64](https://drive.google.com/uc?id=1wnDeGUkiixmBaWkEgiXL-LcyaRqpFcUl&export=download) | 2.1.0 | Linux PPC 64-bit | 9b1621d1656fa8e4f2be21202708ab3bb8c0aa77 | +| [gdrive-linux-ppc64le](https://drive.google.com/uc?id=1iv7qtJom61MPRl-w-0gTqpPr3A41Whfo&export=download) | 2.1.0 | Linux PPC 64-bit le | 0bf501abb505f851a7592456db304f2c4430e9cd | +| [gdrive-windows-386.exe](https://drive.google.com/uc?id=1nbwcqKI1ohhjbUKLH2NcPSRKvYdBwfEI&export=download) | 2.1.0 | Window 32-bit | 4974290122f635d9c71875bc2c760e46b943c5ab | +| [gdrive-windows-x64.exe](https://drive.google.com/uc?id=1zEaLExCMQnwftSYCF7GTONHX-lV7SlTe&export=download) | 2.1.0 | Windows 64-bit | 53780f9a4168c71fd9f3d429932207069d1dcf03 | +| [gdrive-dragonfly-x64](https://drive.google.com/uc?id=1EQc5um6eStZguz5_XX3o-zFFucVlha6V&export=download) | 2.1.0 | DragonFly BSD 64-bit | 5871ee54f992b71b3a1586ec224d93a79265a8b4 | +| [gdrive-freebsd-x64](https://drive.google.com/uc?id=1qVz1-PWdkQHQIJ6MH6X5Z6ZftA1Q-SMT&export=download) | 2.1.0 | FreeBSD 64-bit | ad4d16b0144b8fd05ef7f3174a54fe39b04a766d | +| [gdrive-freebsd-386](https://drive.google.com/uc?id=1NeXS64JMg4Sfr-Szu-MQOh-hg3LX5TYJ&export=download) | 2.1.0 | FreeBSD 32-bit | cc636c839434689b73274e3574ac8477c9542880 | +| [gdrive-freebsd-arm](https://drive.google.com/uc?id=1rrdVWNFcPjx517M04H-V2zt-4gxO3dzQ&export=download) | 2.1.0 | FreeBSD arm | 19abe4b23ad7dd968d0089313eca3108601dca02 | +| [gdrive-netbsd-x64](https://drive.google.com/uc?id=1EOvpqad6Aeh3UuszaOD4-D3mTm-IZwI2&export=download) | 2.1.0 | NetBSD 64-bit | 07af67d4a149a926ee9ccf82591a593eb2330c36 | +| [gdrive-netbsd-386](https://drive.google.com/uc?id=1DKTa2nV6LTs62jFPEwoxjVBdHFouZRC7&export=download) | 2.1.0 | NetBSD 32-bit | 95a090ff3ecb8b6e2180afb02bec6ba7419a33e0 | +| [gdrive-netbsd-arm](https://drive.google.com/uc?id=1lPJd6EgjzBoELJo7Z68ilpJZX2kiwYfr&export=download) | 2.1.0 | NetBSD arm | b953d575f2c306053e31718050698c47388dea26 | +| [gdrive-openbsd-x64](https://drive.google.com/uc?id=1xjpuwbOEuKiks0bIomtt-uYajysrR3Lc&export=download) | 2.1.0 | OpenBSD 64-bit | ac8dbed27ffd7ed0b13b2abb8be4b5ca72da9d7c | +| [gdrive-openbsd-386](https://drive.google.com/uc?id=1sBlO73K8HdyRl1j1GE9QVUDvxGKxjspB&export=download) | 2.1.0 | OpenBSD 32-bit | d7a2d1441d3b41e36da080643c3d5f6306181eae | +| [gdrive-openbsd-arm](https://drive.google.com/uc?id=1-K0eFoNw6fpgw_UINj5ViEuDDjwlOy0A&export=download) | 2.1.0 | OpenBSD arm | 9e508032367471515199f6787aa771e271d307f4 | +| [gdrive-solaris-x64](https://drive.google.com/uc?id=1UNvH_Nj54qR3UCTszXWqtKiD-yqqsnwe&export=download) | 2.1.0 | Solaris 64-bit | c18612da275065064aa650b669de7140dcd94a5f | +| [gdrive-plan9-x64](https://drive.google.com/uc?id=1Es1R2trxekHkk_Fj7vAfd0PxJW2AErHV&export=download) | 2.1.0 | Plan9 64-bit | 8907f5ffb8c5d6b2e15cd9d8f5a51ef762173298 | +| [gdrive-plan9-386](https://drive.google.com/uc?id=19HxOhQDjI_nsMBUQ7Rs79oBhWYeMOX3b&export=download) | 2.1.0 | Plan9 32-bit | f60b749ba57b8b2d824d06a7bd788a8c5808a607 | ## Compile from source ```bash From 8e12e1c1c961954c162d9bc6c6ddb4661850465d Mon Sep 17 00:00:00 2001 From: "M. Mucahid Benlioglu" Date: Mon, 29 Jul 2019 05:43:29 +0300 Subject: [PATCH 186/195] Update README.md Migrated compiled binaries to Github releases. Fixes #479 --- README.md | 54 ++++++++++++++++++++++++++++-------------------------- 1 file changed, 28 insertions(+), 26 deletions(-) diff --git a/README.md b/README.md index c93eda0f..9e7befb7 100644 --- a/README.md +++ b/README.md @@ -36,34 +36,36 @@ Example: `GDRIVE_CONFIG_DIR="/home/user/.gdrive-secondary" gdrive list` You will be prompted for a new verification code if the folder does not exist. ### Downloads +Check our [releases](https://github.com/gdrive-org/gdrive/releases) section. + | Filename | Version | Description | Shasum | |:-----------------------|:--------|:-------------------|:-----------------------------------------| -| [gdrive-osx-x64](https://drive.google.com/uc?id=1hs1l26Kk4xwqLlUuPB8pMfv4Z4qMvM3D&export=download) | 2.1.0 | OS X 64-bit | 2d817cab0912e0efa5529ada7a4bb73fe90d2d74 | -| [gdrive-osx-386](https://drive.google.com/uc?id=1tlzNnSGK12zviZX57qiLCnOoJBLqDlIT&export=download) | 2.1.0 | OS X 32-bit | ed1a12998270f8176c96729d80f3f7307a2e2db6 | -| [gdrive-linux-x64](https://drive.google.com/uc?id=1Ej8VgsW5RgK66Btb9p74tSdHMH3p4UNb&export=download) | 2.1.0 | Linux 64-bit | 6ef7c740e980358a9a5de36c1aac7ea375319aa3 | -| [gdrive-linux-386](https://drive.google.com/uc?id=1eo9hMXz0WyuBwRxPM0LrTtQmhTgOLUlg&export=download) | 2.1.0 | Linux 32-bit | 03f423fc7a444e95de3998d24dbfbdb0335fd055 | -| [gdrive-linux-rpi](https://drive.google.com/uc?id=1f5Q-yAwUYZYt_frYjodFHbU0v7ppQkem&export=download) | 2.1.0 | Linux Raspberry Pi | 0247d36e2591ea009ddbd003300bf10cb58089da | -| [gdrive-linux-arm64](https://drive.google.com/uc?id=1tAx8hbUy4xbGAuvUFistgWZy3twk9rBP&export=download) | 2.1.0 | Linux arm 64-bit | 3a41c27e10587eef0be20fe00415c65723fa3aee | -| [gdrive-linux-arm](https://drive.google.com/uc?id=1140F8FbMxBsvhMjV4BCznnC6qiRYmOCc&export=download) | 2.1.0 | Linux arm 32-bit | 0247d36e2591ea009ddbd003300bf10cb58089da | -| [gdrive-linux-mips64](https://drive.google.com/uc?id=1evxhgWwtb-qDqnUdk_EIr67pM9--1njT&export=download) | 2.1.0 | Linux mips 64-bit | 69bdf4b9a4ae1f9ead9d35ba118a70005bea3829 | -| [gdrive-linux-mips64le](https://drive.google.com/uc?id=1ujljxt_Psl9Kqlsnv4_wgHv4JzvaJEH_&export=download) | 2.1.0 | Linux mips 64-bit le | 421ae3662ed9b486649560512e3c8a43dbf6cdc6 | -| [gdrive-linux-ppc64](https://drive.google.com/uc?id=1wnDeGUkiixmBaWkEgiXL-LcyaRqpFcUl&export=download) | 2.1.0 | Linux PPC 64-bit | 9b1621d1656fa8e4f2be21202708ab3bb8c0aa77 | -| [gdrive-linux-ppc64le](https://drive.google.com/uc?id=1iv7qtJom61MPRl-w-0gTqpPr3A41Whfo&export=download) | 2.1.0 | Linux PPC 64-bit le | 0bf501abb505f851a7592456db304f2c4430e9cd | -| [gdrive-windows-386.exe](https://drive.google.com/uc?id=1nbwcqKI1ohhjbUKLH2NcPSRKvYdBwfEI&export=download) | 2.1.0 | Window 32-bit | 4974290122f635d9c71875bc2c760e46b943c5ab | -| [gdrive-windows-x64.exe](https://drive.google.com/uc?id=1zEaLExCMQnwftSYCF7GTONHX-lV7SlTe&export=download) | 2.1.0 | Windows 64-bit | 53780f9a4168c71fd9f3d429932207069d1dcf03 | -| [gdrive-dragonfly-x64](https://drive.google.com/uc?id=1EQc5um6eStZguz5_XX3o-zFFucVlha6V&export=download) | 2.1.0 | DragonFly BSD 64-bit | 5871ee54f992b71b3a1586ec224d93a79265a8b4 | -| [gdrive-freebsd-x64](https://drive.google.com/uc?id=1qVz1-PWdkQHQIJ6MH6X5Z6ZftA1Q-SMT&export=download) | 2.1.0 | FreeBSD 64-bit | ad4d16b0144b8fd05ef7f3174a54fe39b04a766d | -| [gdrive-freebsd-386](https://drive.google.com/uc?id=1NeXS64JMg4Sfr-Szu-MQOh-hg3LX5TYJ&export=download) | 2.1.0 | FreeBSD 32-bit | cc636c839434689b73274e3574ac8477c9542880 | -| [gdrive-freebsd-arm](https://drive.google.com/uc?id=1rrdVWNFcPjx517M04H-V2zt-4gxO3dzQ&export=download) | 2.1.0 | FreeBSD arm | 19abe4b23ad7dd968d0089313eca3108601dca02 | -| [gdrive-netbsd-x64](https://drive.google.com/uc?id=1EOvpqad6Aeh3UuszaOD4-D3mTm-IZwI2&export=download) | 2.1.0 | NetBSD 64-bit | 07af67d4a149a926ee9ccf82591a593eb2330c36 | -| [gdrive-netbsd-386](https://drive.google.com/uc?id=1DKTa2nV6LTs62jFPEwoxjVBdHFouZRC7&export=download) | 2.1.0 | NetBSD 32-bit | 95a090ff3ecb8b6e2180afb02bec6ba7419a33e0 | -| [gdrive-netbsd-arm](https://drive.google.com/uc?id=1lPJd6EgjzBoELJo7Z68ilpJZX2kiwYfr&export=download) | 2.1.0 | NetBSD arm | b953d575f2c306053e31718050698c47388dea26 | -| [gdrive-openbsd-x64](https://drive.google.com/uc?id=1xjpuwbOEuKiks0bIomtt-uYajysrR3Lc&export=download) | 2.1.0 | OpenBSD 64-bit | ac8dbed27ffd7ed0b13b2abb8be4b5ca72da9d7c | -| [gdrive-openbsd-386](https://drive.google.com/uc?id=1sBlO73K8HdyRl1j1GE9QVUDvxGKxjspB&export=download) | 2.1.0 | OpenBSD 32-bit | d7a2d1441d3b41e36da080643c3d5f6306181eae | -| [gdrive-openbsd-arm](https://drive.google.com/uc?id=1-K0eFoNw6fpgw_UINj5ViEuDDjwlOy0A&export=download) | 2.1.0 | OpenBSD arm | 9e508032367471515199f6787aa771e271d307f4 | -| [gdrive-solaris-x64](https://drive.google.com/uc?id=1UNvH_Nj54qR3UCTszXWqtKiD-yqqsnwe&export=download) | 2.1.0 | Solaris 64-bit | c18612da275065064aa650b669de7140dcd94a5f | -| [gdrive-plan9-x64](https://drive.google.com/uc?id=1Es1R2trxekHkk_Fj7vAfd0PxJW2AErHV&export=download) | 2.1.0 | Plan9 64-bit | 8907f5ffb8c5d6b2e15cd9d8f5a51ef762173298 | -| [gdrive-plan9-386](https://drive.google.com/uc?id=19HxOhQDjI_nsMBUQ7Rs79oBhWYeMOX3b&export=download) | 2.1.0 | Plan9 32-bit | f60b749ba57b8b2d824d06a7bd788a8c5808a607 | +| [gdrive-osx-x64](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-osx-x64) | 2.1.0 | OS X 64-bit | 2d817cab0912e0efa5529ada7a4bb73fe90d2d74 | +| [gdrive-osx-386](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-osx-386) | 2.1.0 | OS X 32-bit | ed1a12998270f8176c96729d80f3f7307a2e2db6 | +| [gdrive-linux-x64](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-linux-x64) | 2.1.0 | Linux 64-bit | 6ef7c740e980358a9a5de36c1aac7ea375319aa3 | +| [gdrive-linux-386](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-linux-386) | 2.1.0 | Linux 32-bit | 03f423fc7a444e95de3998d24dbfbdb0335fd055 | +| [gdrive-linux-rpi](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-linux-rpi) | 2.1.0 | Linux Raspberry Pi | 0247d36e2591ea009ddbd003300bf10cb58089da | +| [gdrive-linux-arm64](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-linux-arm64) | 2.1.0 | Linux arm 64-bit | 3a41c27e10587eef0be20fe00415c65723fa3aee | +| [gdrive-linux-arm](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-linux-arm) | 2.1.0 | Linux arm 32-bit | 0247d36e2591ea009ddbd003300bf10cb58089da | +| [gdrive-linux-mips64](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-linux-mips64) | 2.1.0 | Linux mips 64-bit | 69bdf4b9a4ae1f9ead9d35ba118a70005bea3829 | +| [gdrive-linux-mips64le](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-linux-mips64le) | 2.1.0 | Linux mips 64-bit le | 421ae3662ed9b486649560512e3c8a43dbf6cdc6 | +| [gdrive-linux-ppc64](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-linux-ppc64) | 2.1.0 | Linux PPC 64-bit | 9b1621d1656fa8e4f2be21202708ab3bb8c0aa77 | +| [gdrive-linux-ppc64le](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-linux-ppc64le) | 2.1.0 | Linux PPC 64-bit le | 0bf501abb505f851a7592456db304f2c4430e9cd | +| [gdrive-windows-386.exe](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-windows-386.exe) | 2.1.0 | Window 32-bit | 4974290122f635d9c71875bc2c760e46b943c5ab | +| [gdrive-windows-x64.exe](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-windows-x64.exe) | 2.1.0 | Windows 64-bit | 53780f9a4168c71fd9f3d429932207069d1dcf03 | +| [gdrive-dragonfly-x64](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-dragonfly-x64) | 2.1.0 | DragonFly BSD 64-bit | 5871ee54f992b71b3a1586ec224d93a79265a8b4 | +| [gdrive-freebsd-x64](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-freebsd-x64) | 2.1.0 | FreeBSD 64-bit | ad4d16b0144b8fd05ef7f3174a54fe39b04a766d | +| [gdrive-freebsd-386](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-freebsd-386) | 2.1.0 | FreeBSD 32-bit | cc636c839434689b73274e3574ac8477c9542880 | +| [gdrive-freebsd-arm](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-freebsd-arm) | 2.1.0 | FreeBSD arm | 19abe4b23ad7dd968d0089313eca3108601dca02 | +| [gdrive-netbsd-x64](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-netbsd-x64) | 2.1.0 | NetBSD 64-bit | 07af67d4a149a926ee9ccf82591a593eb2330c36 | +| [gdrive-netbsd-386](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-netbsd-386) | 2.1.0 | NetBSD 32-bit | 95a090ff3ecb8b6e2180afb02bec6ba7419a33e0 | +| [gdrive-netbsd-arm](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-netbsd-arm) | 2.1.0 | NetBSD arm | b953d575f2c306053e31718050698c47388dea26 | +| [gdrive-openbsd-x64](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-openbsd-x64) | 2.1.0 | OpenBSD 64-bit | ac8dbed27ffd7ed0b13b2abb8be4b5ca72da9d7c | +| [gdrive-openbsd-386](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-openbsd-386) | 2.1.0 | OpenBSD 32-bit | d7a2d1441d3b41e36da080643c3d5f6306181eae | +| [gdrive-openbsd-arm](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-openbsd-arm) | 2.1.0 | OpenBSD arm | 9e508032367471515199f6787aa771e271d307f4 | +| [gdrive-solaris-x64](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-solaris-x64) | 2.1.0 | Solaris 64-bit | c18612da275065064aa650b669de7140dcd94a5f | +| [gdrive-plan9-x64](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-plan9-x64) | 2.1.0 | Plan9 64-bit | 8907f5ffb8c5d6b2e15cd9d8f5a51ef762173298 | +| [gdrive-plan9-386](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-plan9-386) | 2.1.0 | Plan9 32-bit | f60b749ba57b8b2d824d06a7bd788a8c5808a607 | ## Compile from source ```bash From 31d0829c180795d17e00b7a354fffe4d72be712b Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Tue, 28 Jul 2020 21:14:37 +0200 Subject: [PATCH 187/195] Add note about not being maintained anymore --- README.md | 39 ++------------------------------------- 1 file changed, 2 insertions(+), 37 deletions(-) diff --git a/README.md b/README.md index 9e7befb7..2be4bc9a 100644 --- a/README.md +++ b/README.md @@ -2,11 +2,8 @@ gdrive ====== -## Note -~~This tool is not being actively maintained at the moment, ymmv~~ **Active maintenance will be resumed soon.** -For incremental and encrypted backup of unix systems [borg](https://github.com/borgbackup/borg) -is a great alternative and [rsync.net](http://rsync.net/products/attic.html) provides -a cheap and reliable backup target. +## Important +This tool is no longer maintained. ## Overview gdrive is a command line utility for interacting with Google Drive. @@ -35,38 +32,6 @@ or set the environment variable `GDRIVE_CONFIG_DIR`. Example: `GDRIVE_CONFIG_DIR="/home/user/.gdrive-secondary" gdrive list` You will be prompted for a new verification code if the folder does not exist. -### Downloads -Check our [releases](https://github.com/gdrive-org/gdrive/releases) section. - -| Filename | Version | Description | Shasum | -|:-----------------------|:--------|:-------------------|:-----------------------------------------| -| [gdrive-osx-x64](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-osx-x64) | 2.1.0 | OS X 64-bit | 2d817cab0912e0efa5529ada7a4bb73fe90d2d74 | -| [gdrive-osx-386](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-osx-386) | 2.1.0 | OS X 32-bit | ed1a12998270f8176c96729d80f3f7307a2e2db6 | -| [gdrive-linux-x64](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-linux-x64) | 2.1.0 | Linux 64-bit | 6ef7c740e980358a9a5de36c1aac7ea375319aa3 | -| [gdrive-linux-386](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-linux-386) | 2.1.0 | Linux 32-bit | 03f423fc7a444e95de3998d24dbfbdb0335fd055 | -| [gdrive-linux-rpi](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-linux-rpi) | 2.1.0 | Linux Raspberry Pi | 0247d36e2591ea009ddbd003300bf10cb58089da | -| [gdrive-linux-arm64](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-linux-arm64) | 2.1.0 | Linux arm 64-bit | 3a41c27e10587eef0be20fe00415c65723fa3aee | -| [gdrive-linux-arm](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-linux-arm) | 2.1.0 | Linux arm 32-bit | 0247d36e2591ea009ddbd003300bf10cb58089da | -| [gdrive-linux-mips64](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-linux-mips64) | 2.1.0 | Linux mips 64-bit | 69bdf4b9a4ae1f9ead9d35ba118a70005bea3829 | -| [gdrive-linux-mips64le](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-linux-mips64le) | 2.1.0 | Linux mips 64-bit le | 421ae3662ed9b486649560512e3c8a43dbf6cdc6 | -| [gdrive-linux-ppc64](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-linux-ppc64) | 2.1.0 | Linux PPC 64-bit | 9b1621d1656fa8e4f2be21202708ab3bb8c0aa77 | -| [gdrive-linux-ppc64le](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-linux-ppc64le) | 2.1.0 | Linux PPC 64-bit le | 0bf501abb505f851a7592456db304f2c4430e9cd | -| [gdrive-windows-386.exe](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-windows-386.exe) | 2.1.0 | Window 32-bit | 4974290122f635d9c71875bc2c760e46b943c5ab | -| [gdrive-windows-x64.exe](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-windows-x64.exe) | 2.1.0 | Windows 64-bit | 53780f9a4168c71fd9f3d429932207069d1dcf03 | -| [gdrive-dragonfly-x64](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-dragonfly-x64) | 2.1.0 | DragonFly BSD 64-bit | 5871ee54f992b71b3a1586ec224d93a79265a8b4 | -| [gdrive-freebsd-x64](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-freebsd-x64) | 2.1.0 | FreeBSD 64-bit | ad4d16b0144b8fd05ef7f3174a54fe39b04a766d | -| [gdrive-freebsd-386](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-freebsd-386) | 2.1.0 | FreeBSD 32-bit | cc636c839434689b73274e3574ac8477c9542880 | -| [gdrive-freebsd-arm](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-freebsd-arm) | 2.1.0 | FreeBSD arm | 19abe4b23ad7dd968d0089313eca3108601dca02 | -| [gdrive-netbsd-x64](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-netbsd-x64) | 2.1.0 | NetBSD 64-bit | 07af67d4a149a926ee9ccf82591a593eb2330c36 | -| [gdrive-netbsd-386](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-netbsd-386) | 2.1.0 | NetBSD 32-bit | 95a090ff3ecb8b6e2180afb02bec6ba7419a33e0 | -| [gdrive-netbsd-arm](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-netbsd-arm) | 2.1.0 | NetBSD arm | b953d575f2c306053e31718050698c47388dea26 | -| [gdrive-openbsd-x64](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-openbsd-x64) | 2.1.0 | OpenBSD 64-bit | ac8dbed27ffd7ed0b13b2abb8be4b5ca72da9d7c | -| [gdrive-openbsd-386](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-openbsd-386) | 2.1.0 | OpenBSD 32-bit | d7a2d1441d3b41e36da080643c3d5f6306181eae | -| [gdrive-openbsd-arm](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-openbsd-arm) | 2.1.0 | OpenBSD arm | 9e508032367471515199f6787aa771e271d307f4 | -| [gdrive-solaris-x64](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-solaris-x64) | 2.1.0 | Solaris 64-bit | c18612da275065064aa650b669de7140dcd94a5f | -| [gdrive-plan9-x64](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-plan9-x64) | 2.1.0 | Plan9 64-bit | 8907f5ffb8c5d6b2e15cd9d8f5a51ef762173298 | -| [gdrive-plan9-386](https://github.com/gdrive-org/gdrive/releases/download/2.1.0/gdrive-plan9-386) | 2.1.0 | Plan9 32-bit | f60b749ba57b8b2d824d06a7bd788a8c5808a607 | - ## Compile from source ```bash go get github.com/prasmussen/gdrive From c68bdf07f3d88fab68b7c790b02de50d4cca1a78 Mon Sep 17 00:00:00 2001 From: Cypherpunk Samurai <66906402+CypherpunkSamurai@users.noreply.github.com> Date: Tue, 22 Sep 2020 01:04:09 +0530 Subject: [PATCH 188/195] Added Automated Release Github Action Automatically compiles binaries for most common platforms and adds them to the release --- .github/workflows/release.yaml | 89 ++++++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 .github/workflows/release.yaml diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 00000000..4b4ef3b9 --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,89 @@ +# .github/workflows/release.yaml + +on: release +name: Build Release +jobs: + release-linux-386: + name: release linux/386 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: compile and release + uses: ngs/go-release.action@v1.0.1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GOARCH: "386" + GOOS: linux + EXTRA_FILES: "LICENSE" + release-linux-amd64: + name: release linux/amd64 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: compile and release + uses: ngs/go-release.action@v1.0.1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GOARCH: amd64 + GOOS: linux + EXTRA_FILES: "LICENSE" + release-linux-arm: + name: release linux/386 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: compile and release + uses: ngs/go-release.action@v1.0.1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GOARCH: "arm" + GOOS: linux + EXTRA_FILES: "LICENSE" + release-linux-arm64: + name: release linux/amd64 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: compile and release + uses: ngs/go-release.action@v1.0.1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GOARCH: arm64 + GOOS: linux + EXTRA_FILES: "LICENSE" + release-darwin-amd64: + name: release darwin/amd64 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: compile and release + uses: ngs/go-release.action@v1.0.1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GOARCH: amd64 + GOOS: darwin + EXTRA_FILES: "LICENSE" + release-windows-386: + name: release windows/386 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: compile and release + uses: ngs/go-release.action@v1.0.1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GOARCH: "386" + GOOS: windows + EXTRA_FILES: "LICENSE" + release-windows-amd64: + name: release windows/amd64 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: compile and release + uses: ngs/go-release.action@v1.0.1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GOARCH: amd64 + GOOS: windows + EXTRA_FILES: "LICENSE" From 64d2ff883948f0ac12eb521f74400bdafd3ca67f Mon Sep 17 00:00:00 2001 From: Josh Schriever Date: Fri, 5 Mar 2021 09:02:23 -0600 Subject: [PATCH 189/195] update path to sabhiram/go-gitignore see https://github.com/sabhiram/go-gitignore/commit/24b2447174be370e56ea86a72f00a76258606067 --- drive/sync.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drive/sync.go b/drive/sync.go index 35ab16eb..4b2d08b9 100644 --- a/drive/sync.go +++ b/drive/sync.go @@ -2,7 +2,7 @@ package drive import ( "fmt" - "github.com/sabhiram/go-git-ignore" + "github.com/sabhiram/go-gitignore" "github.com/soniakeys/graph" "google.golang.org/api/drive/v3" "google.golang.org/api/googleapi" From bd18a2221e2b558e1ebca3aae4d936596a236551 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Fri, 28 May 2021 23:50:56 +0200 Subject: [PATCH 190/195] Add news about being verified for sensitive scopes --- README.md | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 2be4bc9a..b109ba31 100644 --- a/README.md +++ b/README.md @@ -2,12 +2,13 @@ gdrive ====== -## Important -This tool is no longer maintained. - ## Overview gdrive is a command line utility for interacting with Google Drive. +## News +#### 28.05.2021 +gdrive is finally verified for using sensitive scopes which should fix the `This app is blocked` error. + ## Prerequisites None, binaries are statically linked. If you want to compile from source you need the [go toolchain](http://golang.org/doc/install). @@ -39,14 +40,8 @@ go get github.com/prasmussen/gdrive The gdrive binary should now be available at `$GOPATH/bin/gdrive` -## Gdrive 2 -Gdrive 2 is more or less a full rewrite and is not backwards compatible -with gdrive 1 as all the command line arguments has changed slightly. -Gdrive 2 uses version 3 of the google drive api and my google-api-go-client -fork is no longer needed. - ### Syncing -Gdrive 2 supports basic syncing. It only syncs one way at the time and works +Gdrive supports basic syncing. It only syncs one way at the time and works more like rsync than e.g. dropbox. Files that are synced to google drive are tagged with an appProperty so that the files on drive can be traversed faster. This means that you can't upload files with `gdrive upload` into From 281532066b4beac404803967bf1478bb0c13327f Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Fri, 28 May 2021 23:56:01 +0200 Subject: [PATCH 191/195] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b109ba31..14c41ae3 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ gdrive is a command line utility for interacting with Google Drive. ## News #### 28.05.2021 -gdrive is finally verified for using sensitive scopes which should fix the `This app is blocked` error. +gdrive is finally verified for using sensitive scopes which should fix the `This app is blocked` error. Note that the project name will show up as `project-367116221053` when granting access to you account. (I don't dare to change any more settings in the google console.) ## Prerequisites None, binaries are statically linked. From b4b994c6e43114e881be2a81818db42f81a2636d Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Fri, 28 May 2021 23:57:42 +0200 Subject: [PATCH 192/195] Bumb version --- gdrive.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gdrive.go b/gdrive.go index c1a817e7..a505d789 100644 --- a/gdrive.go +++ b/gdrive.go @@ -8,7 +8,7 @@ import ( ) const Name = "gdrive" -const Version = "2.1.0" +const Version = "2.1.1" const DefaultMaxFiles = 30 const DefaultMaxChanges = 100 From fb08fe2ff9c0eab821a5d78fe7ee8fd7a37aeac8 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sat, 29 May 2021 00:41:46 +0200 Subject: [PATCH 193/195] Link to release downloads --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 14c41ae3..46630f6b 100644 --- a/README.md +++ b/README.md @@ -20,8 +20,7 @@ Version 1.5 or higher. brew install gdrive ``` ### Other -Download `gdrive` from one of the links below. On unix systems -run `chmod +x gdrive` after download to make the binary executable. +Download `gdrive` from one of the [links in the latest release](https://github.com/prasmussen/gdrive/releases). The first time gdrive is launched (i.e. run `gdrive about` in your terminal not just `gdrive`), you will be prompted for a verification code. The code is obtained by following the printed url and authenticating with the From c12170d37eeb86ac30626e38a109f63ecb2dbd85 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Sun, 1 Jan 2023 21:27:04 +0100 Subject: [PATCH 194/195] Add news about gdrive 3.0 --- README.md | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 46630f6b..5509a8a3 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,11 @@ -gdrive -====== - +## News +#### 01.01.2023 +Work has started on [Gdrive 3.0](https://github.com/glotlabs/gdrive) which will replace this project. +It is not quite ready yet, but you can follow the progress. -## Overview -gdrive is a command line utility for interacting with Google Drive. +Gdrive3 will only implement the most requested functionality. +If you have specific features that you want implemented please create an issue or join the discord to dicuss. -## News -#### 28.05.2021 -gdrive is finally verified for using sensitive scopes which should fix the `This app is blocked` error. Note that the project name will show up as `project-367116221053` when granting access to you account. (I don't dare to change any more settings in the google console.) ## Prerequisites None, binaries are statically linked. From ab270856c83aa5d8be5e537f71505f4bd27e99f5 Mon Sep 17 00:00:00 2001 From: Petter Rasmussen Date: Wed, 19 Apr 2023 20:46:46 +0200 Subject: [PATCH 195/195] Add note about not being maintained --- README.md | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 5509a8a3..6a57202a 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,25 @@ -## News -#### 01.01.2023 -Work has started on [Gdrive 3.0](https://github.com/glotlabs/gdrive) which will replace this project. -It is not quite ready yet, but you can follow the progress. +## IMPORTANT +This repository is not maintained anymore. [Gdrive 3](https://github.com/glotlabs/gdrive) is its successor. + +``` + + + + + + + -Gdrive3 will only implement the most requested functionality. -If you have specific features that you want implemented please create an issue or join the discord to dicuss. + + + + + + +``` + ## Prerequisites None, binaries are statically linked. If you want to compile from source you need the [go toolchain](http://golang.org/doc/install).