diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index e6f80325..53dcff26 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1,48 +1,78 @@ { "ImportPath": "github.com/prasmussen/gdrive", - "GoVersion": "go1.6", - "GodepVersion": "v61", + "GoVersion": "go1.10", + "GodepVersion": "v80", + "Packages": [ + "./..." + ], "Deps": [ + { + "ImportPath": "cloud.google.com/go/compute/metadata", + "Comment": "v0.35.1-5-gb2f67320", + "Rev": "b2f67320b4ef8ea4a3b762e54394254d11fa2844" + }, { "ImportPath": "github.com/sabhiram/go-git-ignore", - "Rev": "228fcfa2a06e870a3ef238d54c45ea847f492a37" + "Comment": "1.0.2", + "Rev": "d3107576ba9425fc1c85f4b3569c4631b805a02e" + }, + { + "ImportPath": "github.com/soniakeys/bits", + "Comment": "v1.0.0", + "Rev": "6d078272e8c4c60e9a39dbd1415f6ca61ee30be7" }, { "ImportPath": "github.com/soniakeys/graph", - "Comment": "svg-v0-58-gc265d96", - "Rev": "c265d9676750b13b9520ba4ad4f8359fa1aed9fd" + "Comment": "v0.0.0-27-g7b5d1f6", + "Rev": "7b5d1f6e4fe06231cf5c9a55229321b73e167913" }, { "ImportPath": "golang.org/x/net/context", - "Rev": "fb93926129b8ec0056f2f458b1f519654814edf0" + "Rev": "d26f9f9a57f3fab6a695bec0d84433c2c50f8bbf" }, { "ImportPath": "golang.org/x/net/context/ctxhttp", - "Rev": "fb93926129b8ec0056f2f458b1f519654814edf0" + "Rev": "d26f9f9a57f3fab6a695bec0d84433c2c50f8bbf" }, { "ImportPath": "golang.org/x/oauth2", - "Rev": "7e9cd5d59563851383f8f81a7fbb01213709387c" + "Rev": "99b60b757ec124ebb7d6b7e97f153b19c10ce163" + }, + { + "ImportPath": "golang.org/x/oauth2/google", + "Rev": "99b60b757ec124ebb7d6b7e97f153b19c10ce163" }, { "ImportPath": "golang.org/x/oauth2/internal", - "Rev": "7e9cd5d59563851383f8f81a7fbb01213709387c" + "Rev": "99b60b757ec124ebb7d6b7e97f153b19c10ce163" + }, + { + "ImportPath": "golang.org/x/oauth2/jws", + "Rev": "99b60b757ec124ebb7d6b7e97f153b19c10ce163" + }, + { + "ImportPath": "golang.org/x/oauth2/jwt", + "Rev": "99b60b757ec124ebb7d6b7e97f153b19c10ce163" }, { "ImportPath": "google.golang.org/api/drive/v3", - "Rev": "9737cc9e103c00d06a8f3993361dec083df3d252" + "Comment": "v0.1.0-17-gd236112f5", + "Rev": "d236112f57134c870530b511f69766d9805df835" }, { "ImportPath": "google.golang.org/api/gensupport", - "Rev": "9737cc9e103c00d06a8f3993361dec083df3d252" + "Comment": "v0.1.0-17-gd236112f5", + "Rev": "d236112f57134c870530b511f69766d9805df835" }, { "ImportPath": "google.golang.org/api/googleapi", - "Rev": "9737cc9e103c00d06a8f3993361dec083df3d252" + "Comment": "v0.1.0-17-gd236112f5", + "Rev": "d236112f57134c870530b511f69766d9805df835" }, { "ImportPath": "google.golang.org/api/googleapi/internal/uritemplates", - "Rev": "9737cc9e103c00d06a8f3993361dec083df3d252" + "Comment": "v0.1.0-17-gd236112f5", + "Rev": "d236112f57134c870530b511f69766d9805df835" } ] } diff --git a/README.md b/README.md index 4c6b5cd7..2927d42f 100644 --- a/README.md +++ b/README.md @@ -91,13 +91,30 @@ syncing many files. Currently only one file is uploaded at the time, the speed can be improved in the future by uploading several files concurrently. To learn more see usage and the examples below. +## Client Credentials +By default, gdrive uses OAuth2 client credentials (client id and secret) in +order to allow it to use Google's Drive APIs on behalf of the user. The credentials +are set inside gdrive by default. Unfortunately this means that the rate limit of +API usage is shared amongst all gdrive users globally. It has become common for +the rate limit to be exceeded, causing gdrive to stop stop working. + +To get around this, gdrive now supports loading external OAuth2 client credentials. + +Generate your new credentials in the Google API Console, name the credentials +file `client_id.json` and move it to the gdrive config directory. +Then delete `token_v2.json` to reauthenticate using the new credentials. + +One side effect to note is that existing sync directories will not work after the +credentials change because sync sets credential specific properties on the files +(appProperty). + ### Service Account -For server to server communication, where user interaction is not a viable option, +For server to server communication, where user interaction is not a viable option, is it possible to use a service account, as described in this [Google document](https://developers.google.com/identity/protocols/OAuth2ServiceAccount). If you want to use a service account, instead of being interactively prompted for -authentication, you need to use the `--service-account ` +authentication, you need to use the `--service-account ` global option, where `serviceAccountCredentials` is a file in JSON format obtained -through the Google API Console, and its location is relative to the config dir. +through the Google API Console, and its location is relative to the config dir. #### .gdriveignore Placing a .gdriveignore in the root of your sync directory can be used to @@ -174,7 +191,7 @@ global: --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) - + options: -f, --force Overwrite existing file -r, --recursive Download directory recursively, documents will be skipped @@ -194,7 +211,7 @@ global: --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) - + options: -f, --force Overwrite existing file -r, --recursive Download directories recursively, documents will be skipped @@ -211,7 +228,7 @@ global: --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) - + options: -r, --recursive Upload directory recursively -p, --parent Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents @@ -234,7 +251,7 @@ global: --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) - + options: -p, --parent Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents --chunksize Set chunk size in bytes, default: 8388608 @@ -254,7 +271,7 @@ global: --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) - + options: -p, --parent Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents --name Filename @@ -274,7 +291,7 @@ global: --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) - + options: --bytes Show size in bytes ``` @@ -288,7 +305,7 @@ global: --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) - + options: -p, --parent Parent id of created directory, can be specified multiple times to give many parents --description Directory description @@ -303,7 +320,7 @@ global: --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) - + options: --role Share role: owner/writer/commenter/reader, default: reader --type Share type: user/group/domain/anyone, default: anyone @@ -343,7 +360,7 @@ global: --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) - + options: -r, --recursive Delete directory and all it's content ``` @@ -357,7 +374,7 @@ global: --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) - + options: --no-header Dont print the header ``` @@ -371,7 +388,7 @@ global: --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) - + options: --order Sort order. See https://godoc.org/google.golang.org/api/drive/v3#FilesListCall.OrderBy --path-width Width of path column, default: 60, minimum: 9, use 0 for full width @@ -388,7 +405,7 @@ global: --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) - + options: --keep-remote Keep remote file when a conflict is encountered --keep-local Keep local file when a conflict is encountered @@ -408,7 +425,7 @@ global: --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) - + options: --keep-remote Keep remote file when a conflict is encountered --keep-local Keep local file when a conflict is encountered @@ -429,7 +446,7 @@ global: --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) - + options: -m, --max Max changes to list, default: 100 --since Page token to start listing changes from @@ -447,7 +464,7 @@ global: --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) - + options: --name-width Width of name column, default: 40, minimum: 9, use 0 for full width --no-header Dont print the header @@ -463,7 +480,7 @@ global: --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) - + options: -f, --force Overwrite existing file --no-progress Hide progress @@ -492,7 +509,7 @@ global: --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) - + options: -p, --parent Parent id, used to upload file to a specific directory, can be specified multiple times to give many parents --no-progress Hide progress @@ -507,7 +524,7 @@ global: --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) - + options: -f, --force Overwrite existing file --mime Mime type of exported file @@ -523,7 +540,7 @@ global: --refresh-token Oauth refresh token used to get access token (for advanced users) --access-token Oauth access token, only recommended for short-lived requests because of short lifetime (for advanced users) --service-account Oauth service account filename, used for server to server communication without user interaction (file is relative to config dir) - + options: --bytes Show size in bytes ``` diff --git a/auth/file_source.go b/auth/file_source.go index 9223d1dc..9c6eff9a 100644 --- a/auth/file_source.go +++ b/auth/file_source.go @@ -3,6 +3,7 @@ package auth import ( "encoding/json" "golang.org/x/oauth2" + "golang.org/x/oauth2/google" "io/ioutil" "os" ) @@ -43,6 +44,15 @@ func ReadFile(path string) ([]byte, bool, error) { return content, true, nil } +func ReadClientCredentials(path string) (*oauth2.Config, bool, error) { + + content, exists, err := ReadFile(path) + if err != nil || exists == false { + return nil, exists, err + } + conf, err := google.ConfigFromJSON(content) + return conf, exists, err +} func ReadToken(path string) (*oauth2.Token, bool, error) { diff --git a/auth/oauth.go b/auth/oauth.go index bc567385..5e09e08b 100644 --- a/auth/oauth.go +++ b/auth/oauth.go @@ -10,8 +10,7 @@ import ( type authCodeFn func(string) func() string -func NewFileSourceClient(clientId, clientSecret, tokenFile string, authFn authCodeFn) (*http.Client, error) { - conf := getConfig(clientId, clientSecret) +func NewFileSourceClient(conf *oauth2.Config, tokenFile string, authFn authCodeFn) (*http.Client, error) { // Read cached token token, exists, err := ReadToken(tokenFile) @@ -36,8 +35,7 @@ func NewFileSourceClient(clientId, clientSecret, tokenFile string, authFn authCo ), nil } -func NewRefreshTokenClient(clientId, clientSecret, refreshToken string) *http.Client { - conf := getConfig(clientId, clientSecret) +func NewRefreshTokenClient(conf *oauth2.Config, refreshToken string) *http.Client { token := &oauth2.Token{ TokenType: "Bearer", @@ -51,8 +49,7 @@ func NewRefreshTokenClient(clientId, clientSecret, refreshToken string) *http.Cl ) } -func NewAccessTokenClient(clientId, clientSecret, accessToken string) *http.Client { - conf := getConfig(clientId, clientSecret) +func NewAccessTokenClient(conf *oauth2.Config, accessToken string) *http.Client { token := &oauth2.Token{ TokenType: "Bearer", @@ -82,7 +79,7 @@ func NewServiceAccountClient(serviceAccountFile string) (*http.Client, error) { return conf.Client(oauth2.NoContext), nil } -func getConfig(clientId, clientSecret string) *oauth2.Config { +func AssembleClientCredentials(clientId, clientSecret string) *oauth2.Config { return &oauth2.Config{ ClientID: clientId, ClientSecret: clientSecret, diff --git a/handlers_drive.go b/handlers_drive.go index 7bda872f..c3f52a49 100644 --- a/handlers_drive.go +++ b/handlers_drive.go @@ -17,8 +17,11 @@ import ( const ClientId = "367116221053-7n0vf5akeru7on6o2fjinrecpdoe99eg.apps.googleusercontent.com" const ClientSecret = "1qsNodXNaWq1mQuBjUjmvhoO" const TokenFilename = "token_v2.json" +const ClientCredentialsFilename = "client_id.json" const DefaultCacheFileName = "file_cache.json" +var usingClientCredentialsFile = false + func listHandler(ctx cli.Context) { args := ctx.Args() err := newDrive(args).List(drive.ListFilesArgs{ @@ -345,16 +348,31 @@ func getOauthClient(args cli.Arguments) (*http.Client, error) { ExitF("Access token not needed when refresh token is provided") } + configDir := getConfigDir(args) + + clientCredentialsPath := ConfigFilePath(configDir, ClientCredentialsFilename) + clientCredentials, exists, err := auth.ReadClientCredentials(clientCredentialsPath) + if err != nil { + ExitF("Failed to read client credentials file: %s", err) + } else if !exists { + clientCredentials = auth.AssembleClientCredentials(ClientId, ClientSecret) + } else { + usingClientCredentialsFile = true + // Make sure the google drive scope is present + if len(clientCredentials.Scopes) == 0 { + clientCredentials.Scopes = append(clientCredentials.Scopes, "https://www.googleapis.com/auth/drive") + } + } + + if args.String("refreshToken") != "" { - return auth.NewRefreshTokenClient(ClientId, ClientSecret, args.String("refreshToken")), nil + return auth.NewRefreshTokenClient(clientCredentials, args.String("refreshToken")), nil } if args.String("accessToken") != "" { - return auth.NewAccessTokenClient(ClientId, ClientSecret, args.String("accessToken")), nil + return auth.NewAccessTokenClient(clientCredentials, args.String("accessToken")), nil } - configDir := getConfigDir(args) - if args.String("serviceAccount") != "" { serviceAccountPath := ConfigFilePath(configDir, args.String("serviceAccount")) serviceAccountClient, err := auth.NewServiceAccountClient(serviceAccountPath) @@ -365,7 +383,7 @@ func getOauthClient(args cli.Arguments) (*http.Client, error) { } tokenPath := ConfigFilePath(configDir, TokenFilename) - return auth.NewFileSourceClient(ClientId, ClientSecret, tokenPath, authCodePrompt) + return auth.NewFileSourceClient(clientCredentials, tokenPath, authCodePrompt) } func getConfigDir(args cli.Arguments) string { @@ -392,6 +410,12 @@ func newDrive(args cli.Arguments) *drive.Drive { func authCodePrompt(url string) func() string { return func() string { + if (usingClientCredentialsFile == true) { + fmt.Println("Client credentials loaded from file\n") + } else { + fmt.Println("Client credentials file not found. Using built-in defaults\n") + } + fmt.Println("Authentication needed") fmt.Println("Go to the following url in your browser:") fmt.Printf("%s\n\n", url) diff --git a/vendor/cloud.google.com/go/AUTHORS b/vendor/cloud.google.com/go/AUTHORS new file mode 100644 index 00000000..c364af1d --- /dev/null +++ b/vendor/cloud.google.com/go/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of cloud authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. + +Filippo Valsorda +Google Inc. +Ingo Oeser +Palm Stone Games, Inc. +Paweł Knap +Péter Szilágyi +Tyler Treat diff --git a/vendor/cloud.google.com/go/CONTRIBUTORS b/vendor/cloud.google.com/go/CONTRIBUTORS new file mode 100644 index 00000000..3b3cbed9 --- /dev/null +++ b/vendor/cloud.google.com/go/CONTRIBUTORS @@ -0,0 +1,40 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# Names should be added to this file as: +# Name + +# Keep the list alphabetically sorted. + +Alexis Hunt +Andreas Litt +Andrew Gerrand +Brad Fitzpatrick +Burcu Dogan +Dave Day +David Sansome +David Symonds +Filippo Valsorda +Glenn Lewis +Ingo Oeser +James Hall +Johan Euphrosine +Jonathan Amsterdam +Kunpei Sakai +Luna Duclos +Magnus Hiie +Mario Castro +Michael McGreevy +Omar Jarjur +Paweł Knap +Péter Szilágyi +Sarah Adams +Thanatat Tamtan +Toby Burress +Tuo Shan +Tyler Treat diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/cloud.google.com/go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go new file mode 100644 index 00000000..20b7e5be --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -0,0 +1,513 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metadata provides access to Google Compute Engine (GCE) +// metadata and API service accounts. +// +// This package is a wrapper around the GCE metadata service, +// as documented at https://developers.google.com/compute/docs/metadata. +package metadata + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strings" + "sync" + "time" +) + +const ( + // metadataIP is the documented metadata server IP address. + metadataIP = "169.254.169.254" + + // metadataHostEnv is the environment variable specifying the + // GCE metadata hostname. If empty, the default value of + // metadataIP ("169.254.169.254") is used instead. + // This is variable name is not defined by any spec, as far as + // I know; it was made up for the Go package. + metadataHostEnv = "GCE_METADATA_HOST" + + userAgent = "gcloud-golang/0.1" +) + +type cachedValue struct { + k string + trim bool + mu sync.Mutex + v string +} + +var ( + projID = &cachedValue{k: "project/project-id", trim: true} + projNum = &cachedValue{k: "project/numeric-project-id", trim: true} + instID = &cachedValue{k: "instance/id", trim: true} +) + +var ( + defaultClient = &Client{hc: &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 2 * time.Second, + }, + }} + subscribeClient = &Client{hc: &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }, + }} +) + +// NotDefinedError is returned when requested metadata is not defined. +// +// The underlying string is the suffix after "/computeMetadata/v1/". +// +// This error is not returned if the value is defined to be the empty +// string. +type NotDefinedError string + +func (suffix NotDefinedError) Error() string { + return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) +} + +func (c *cachedValue) get(cl *Client) (v string, err error) { + defer c.mu.Unlock() + c.mu.Lock() + if c.v != "" { + return c.v, nil + } + if c.trim { + v, err = cl.getTrimmed(c.k) + } else { + v, err = cl.Get(c.k) + } + if err == nil { + c.v = v + } + return +} + +var ( + onGCEOnce sync.Once + onGCE bool +) + +// OnGCE reports whether this process is running on Google Compute Engine. +func OnGCE() bool { + onGCEOnce.Do(initOnGCE) + return onGCE +} + +func initOnGCE() { + onGCE = testOnGCE() +} + +func testOnGCE() bool { + // The user explicitly said they're on GCE, so trust them. + if os.Getenv(metadataHostEnv) != "" { + return true + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resc := make(chan bool, 2) + + // Try two strategies in parallel. + // See https://github.com/googleapis/google-cloud-go/issues/194 + go func() { + req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) + req.Header.Set("User-Agent", userAgent) + res, err := defaultClient.hc.Do(req.WithContext(ctx)) + if err != nil { + resc <- false + return + } + defer res.Body.Close() + resc <- res.Header.Get("Metadata-Flavor") == "Google" + }() + + go func() { + addrs, err := net.LookupHost("metadata.google.internal") + if err != nil || len(addrs) == 0 { + resc <- false + return + } + resc <- strsContains(addrs, metadataIP) + }() + + tryHarder := systemInfoSuggestsGCE() + if tryHarder { + res := <-resc + if res { + // The first strategy succeeded, so let's use it. + return true + } + // Wait for either the DNS or metadata server probe to + // contradict the other one and say we are running on + // GCE. Give it a lot of time to do so, since the system + // info already suggests we're running on a GCE BIOS. + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case res = <-resc: + return res + case <-timer.C: + // Too slow. Who knows what this system is. + return false + } + } + + // There's no hint from the system info that we're running on + // GCE, so use the first probe's result as truth, whether it's + // true or false. The goal here is to optimize for speed for + // users who are NOT running on GCE. We can't assume that + // either a DNS lookup or an HTTP request to a blackholed IP + // address is fast. Worst case this should return when the + // metaClient's Transport.ResponseHeaderTimeout or + // Transport.Dial.Timeout fires (in two seconds). + return <-resc +} + +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + if runtime.GOOS != "linux" { + // We don't have any non-Linux clues available, at least yet. + return false + } + slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(slurp)) + return name == "Google" || name == "Google Compute Engine" +} + +// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no +// ResponseHeaderTimeout). +func Subscribe(suffix string, fn func(v string, ok bool) error) error { + return subscribeClient.Subscribe(suffix, fn) +} + +// Get calls Client.Get on the default client. +func Get(suffix string) (string, error) { return defaultClient.Get(suffix) } + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return defaultClient.ProjectID() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { return defaultClient.InternalIP() } + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { return defaultClient.ExternalIP() } + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { return defaultClient.Hostname() } + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { return defaultClient.InstanceID() } + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { return defaultClient.InstanceName() } + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { return defaultClient.Zone() } + +// InstanceAttributes calls Client.InstanceAttributes on the default client. +func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } + +// ProjectAttributes calls Client.ProjectAttributes on the default client. +func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } + +// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. +func InstanceAttributeValue(attr string) (string, error) { + return defaultClient.InstanceAttributeValue(attr) +} + +// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. +func ProjectAttributeValue(attr string) (string, error) { + return defaultClient.ProjectAttributeValue(attr) +} + +// Scopes calls Client.Scopes on the default client. +func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } + +func strsContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} + +// A Client provides metadata. +type Client struct { + hc *http.Client +} + +// NewClient returns a Client that can be used to fetch metadata. All HTTP requests +// will use the given http.Client instead of the default client. +func NewClient(c *http.Client) *Client { + return &Client{hc: c} +} + +// getETag returns a value from the metadata service as well as the associated ETag. +// This func is otherwise equivalent to Get. +func (c *Client) getETag(suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv(metadataHostEnv) + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = metadataIP + } + u := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", u, nil) + req.Header.Set("Metadata-Flavor", "Google") + req.Header.Set("User-Agent", userAgent) + res, err := c.hc.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + if res.StatusCode != 200 { + return "", "", &Error{Code: res.StatusCode, Message: string(all)} + } + return string(all), res.Header.Get("Etag"), nil +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func (c *Client) Get(suffix string) (string, error) { + val, _, err := c.getETag(suffix) + return val, err +} + +func (c *Client) getTrimmed(suffix string) (s string, err error) { + s, err = c.Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *Client) lines(suffix string) ([]string, error) { + j, err := c.Get(suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// ProjectID returns the current instance's project ID string. +func (c *Client) ProjectID() (string, error) { return projID.get(c) } + +// NumericProjectID returns the current instance's numeric project ID. +func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } + +// InstanceID returns the current VM's numeric instance ID. +func (c *Client) InstanceID() (string, error) { return instID.get(c) } + +// InternalIP returns the instance's primary internal IP address. +func (c *Client) InternalIP() (string, error) { + return c.getTrimmed("instance/network-interfaces/0/ip") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func (c *Client) ExternalIP() (string, error) { + return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func (c *Client) Hostname() (string, error) { + return c.getTrimmed("instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func (c *Client) InstanceTags() ([]string, error) { + var s []string + j, err := c.Get("instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceName returns the current VM's instance ID string. +func (c *Client) InstanceName() (string, error) { + host, err := c.Hostname() + if err != nil { + return "", err + } + return strings.Split(host, ".")[0], nil +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func (c *Client) Zone() (string, error) { + zone, err := c.getTrimmed("instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) InstanceAttributeValue(attr string) (string, error) { + return c.Get("instance/attributes/" + attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) ProjectAttributeValue(attr string) (string, error) { + return c.Get("project/attributes/" + attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func (c *Client) Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") +} + +// Subscribe subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// Subscribe calls fn with the latest metadata value indicated by the provided +// suffix. If the metadata value is deleted, fn is called with the empty string +// and ok false. Subscribe blocks until fn returns a non-nil error or the value +// is deleted. Subscribe returns the error value returned from the last call to +// fn, which may be nil when ok == false. +func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error { + const failedSubscribeSleep = time.Second * 5 + + // First check to see if the metadata value exists at all. + val, lastETag, err := c.getETag(suffix) + if err != nil { + return err + } + + if err := fn(val, true); err != nil { + return err + } + + ok := true + if strings.ContainsRune(suffix, '?') { + suffix += "&wait_for_change=true&last_etag=" + } else { + suffix += "?wait_for_change=true&last_etag=" + } + for { + val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag)) + if err != nil { + if _, deleted := err.(NotDefinedError); !deleted { + time.Sleep(failedSubscribeSleep) + continue // Retry on other errors. + } + ok = false + } + lastETag = etag + + if err := fn(val, ok); err != nil || !ok { + return err + } + } +} + +// Error contains an error response from the server. +type Error struct { + // Code is the HTTP response status code. + Code int + // Message is the server response message. + Message string +} + +func (e *Error) Error() string { + return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message) +} diff --git a/vendor/github.com/sabhiram/go-git-ignore/README.md b/vendor/github.com/sabhiram/go-git-ignore/README.md index fbbb3761..9440d6dd 100644 --- a/vendor/github.com/sabhiram/go-git-ignore/README.md +++ b/vendor/github.com/sabhiram/go-git-ignore/README.md @@ -1,17 +1,15 @@ -# go-git-ignore +# go-gitignore -[![Build Status](https://travis-ci.org/sabhiram/go-git-ignore.svg)](https://travis-ci.org/sabhiram/go-git-ignore) [![Coverage Status](https://coveralls.io/repos/sabhiram/go-git-ignore/badge.png?branch=master)](https://coveralls.io/r/sabhiram/go-git-ignore?branch=master) +[![Build Status](https://travis-ci.org/sabhiram/go-gitignore.svg)](https://travis-ci.org/sabhiram/go-gitignore) [![Coverage Status](https://coveralls.io/repos/github/sabhiram/go-gitignore/badge.svg?branch=master)](https://coveralls.io/github/sabhiram/go-gitignore?branch=master) A gitignore parser for `Go` ## Install ```shell -go get github.com/sabhiram/go-git-ignore +go get github.com/sabhiram/go-gitignore ``` ## Usage -```shell -TODO -``` +For a quick sample of how to use this library, check out the tests under `ignore_test.go`. diff --git a/vendor/github.com/sabhiram/go-git-ignore/ignore.go b/vendor/github.com/sabhiram/go-git-ignore/ignore.go index e3241b2c..846b52cb 100644 --- a/vendor/github.com/sabhiram/go-git-ignore/ignore.go +++ b/vendor/github.com/sabhiram/go-git-ignore/ignore.go @@ -58,20 +58,16 @@ import ( "strings" ) -// An IgnoreParser is an interface which exposes two methods: -// MatchesPath() - Returns true if the path is targeted by the patterns compiled in the GitIgnore structure +//////////////////////////////////////////////////////////// + +// An IgnoreParser is an interface which exposes a single method: +// MatchesPath() - Returns true if the path is targeted by the patterns compiled +// in the GitIgnore structure type IgnoreParser interface { - IncludesPath(f string) bool - IgnoresPath(f string) bool MatchesPath(f string) bool } -// GitIgnore is a struct which contains a slice of regexp.Regexp -// patterns -type GitIgnore struct { - patterns []*regexp.Regexp // List of regexp patterns which this ignore file applies - negate []bool // List of booleans which determine if the pattern is negated -} +//////////////////////////////////////////////////////////// // This function pretty much attempts to mimic the parsing rules // listed above at the start of this file @@ -151,19 +147,32 @@ func getPatternFromLine(line string) (*regexp.Regexp, bool) { return pattern, negatePattern } +//////////////////////////////////////////////////////////// + +// ignorePattern encapsulates a pattern and if it is a negated pattern. +type ignorePattern struct { + pattern *regexp.Regexp + negate bool +} + +// GitIgnore wraps a list of ignore pattern. +type GitIgnore struct { + patterns []*ignorePattern +} + // Accepts a variadic set of strings, and returns a GitIgnore object which // converts and appends the lines in the input to regexp.Regexp patterns // held within the GitIgnore objects "patterns" field func CompileIgnoreLines(lines ...string) (*GitIgnore, error) { - g := new(GitIgnore) + gi := &GitIgnore{} for _, line := range lines { pattern, negatePattern := getPatternFromLine(line) if pattern != nil { - g.patterns = append(g.patterns, pattern) - g.negate = append(g.negate, negatePattern) + ip := &ignorePattern{pattern, negatePattern} + gi.patterns = append(gi.patterns, ip) } } - return g, nil + return gi, nil } // Accepts a ignore file as the input, parses the lines out of the file @@ -177,24 +186,38 @@ func CompileIgnoreFile(fpath string) (*GitIgnore, error) { return nil, error } -// MatchesPath is an interface function for the IgnoreParser interface. -// It returns true if the given GitIgnore structure would target a given -// path string "f" -func (g GitIgnore) MatchesPath(f string) bool { +// Accepts a ignore file as the input, parses the lines out of the file +// and invokes the CompileIgnoreLines method with additional lines +func CompileIgnoreFileAndLines(fpath string, lines ...string) (*GitIgnore, error) { + buffer, error := ioutil.ReadFile(fpath) + if error == nil { + s := strings.Split(string(buffer), "\n") + return CompileIgnoreLines(append(s, lines...)...) + } + return nil, error +} + +//////////////////////////////////////////////////////////// + +// MatchesPath returns true if the given GitIgnore structure would target +// a given path string `f`. +func (gi *GitIgnore) MatchesPath(f string) bool { // Replace OS-specific path separator. f = strings.Replace(f, string(os.PathSeparator), "/", -1) matchesPath := false - for idx, pattern := range g.patterns { - if pattern.MatchString(f) { + for _, ip := range gi.patterns { + if ip.pattern.MatchString(f) { // If this is a regular target (not negated with a gitignore exclude "!" etc) - if !g.negate[idx] { + if !ip.negate { matchesPath = true - // Negated pattern, and matchesPath is already set } else if matchesPath { + // Negated pattern, and matchesPath is already set matchesPath = false } } } return matchesPath } + +//////////////////////////////////////////////////////////// diff --git a/vendor/github.com/sabhiram/go-git-ignore/version_gen.go b/vendor/github.com/sabhiram/go-git-ignore/version_gen.go new file mode 100644 index 00000000..720a0575 --- /dev/null +++ b/vendor/github.com/sabhiram/go-git-ignore/version_gen.go @@ -0,0 +1,12 @@ +package ignore + +// WARNING: Auto generated version file. Do not edit this file by hand. +// WARNING: go get github.com/sabhiram/gover to manage this file. +// Version: 1.0.2 +const ( + Major = 1 + Minor = 0 + Patch = 2 + + Version = "1.0.2" +) diff --git a/vendor/github.com/soniakeys/bits/.travis.yml b/vendor/github.com/soniakeys/bits/.travis.yml new file mode 100644 index 00000000..2df1f242 --- /dev/null +++ b/vendor/github.com/soniakeys/bits/.travis.yml @@ -0,0 +1,9 @@ +sudo: false +language: go +go: master +before_script: + - go vet + - go get github.com/client9/misspell/cmd/misspell + - misspell -error * + - go get github.com/soniakeys/vetc + - vetc diff --git a/vendor/github.com/soniakeys/bits/bits.go b/vendor/github.com/soniakeys/bits/bits.go new file mode 100644 index 00000000..416b0d89 --- /dev/null +++ b/vendor/github.com/soniakeys/bits/bits.go @@ -0,0 +1,463 @@ +// Copyright 2017 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +// Bits implements methods on a bit array type. +// +// The Bits type holds a fixed size array of bits, numbered consecutively +// from zero. Some set-like operations are possible, but the API is more +// array-like or register-like. +package bits + +import ( + "fmt" + mb "math/bits" +) + +// Bits holds a fixed number of bits. +// +// Bit number 0 is stored in the LSB, or bit 0, of the word indexed at 0. +// +// When Num is not a multiple of 64, the last element of Bits will hold some +// bits beyond Num. These bits are undefined. They are not required to be +// zero but do not have any meaning. Bits methods are not required to leave +// them undisturbed. +type Bits struct { + Num int // number of bits + Bits []uint64 +} + +// New constructs a Bits value with the given number of bits. +// +// It panics if num is negative. +func New(num int) Bits { + if num < 0 { + panic("negative number of bits") + } + return Bits{num, make([]uint64, (num+63)>>6)} +} + +// NewGivens constructs a Bits value with the given bits nums set to 1. +// +// The number of bits will be just enough to hold the largest bit value +// listed. That is, the number of bits will be the max bit number plus one. +// +// It panics if any bit number is negative. +func NewGivens(nums ...int) Bits { + max := -1 + for _, p := range nums { + if p > max { + max = p + } + } + b := New(max + 1) + for _, p := range nums { + b.SetBit(p, 1) + } + return b +} + +// AllOnes returns true if all Num bits are 1. +func (b Bits) AllOnes() bool { + last := len(b.Bits) - 1 + for _, w := range b.Bits[:last] { + if w != ^uint64(0) { + return false + } + } + return ^b.Bits[last]<= b.Num { + panic("bit number out of range") + } + return int(b.Bits[n>>6] >> uint(n&63) & 1) +} + +// ClearAll sets all bits to 0. +func (b Bits) ClearAll() { + for i := range b.Bits { + b.Bits[i] = 0 + } +} + +// ClearBits sets the given bits to 0 in receiver b. +// +// Other bits of b are left unchanged. +// +// It panics if any bit number is out of range. +// That is, negative or >= the number of bits. +func (b Bits) ClearBits(nums ...int) { + for _, p := range nums { + b.SetBit(p, 0) + } +} + +// Equal returns true if all Num bits of a and b are equal. +// +// It panics if a and b have different Num. +func (a Bits) Equal(b Bits) bool { + if a.Num != b.Num { + panic("receiver and argument have different number of bits") + } + if a.Num == 0 { + return true + } + last := len(a.Bits) - 1 + for i, w := range a.Bits[:last] { + if w != b.Bits[i] { + return false + } + } + return (a.Bits[last]^b.Bits[last])<= b.Num { + return true + } + if !v(x<<6 | i) { + return false + } + w >>= uint(t + 1) + if w == 0 { + break + } + t = mb.TrailingZeros64(w) + i += 1 + t + } + } + } + return true +} + +// IterateZeros calls visitor function v for each bit with a value of 0, +// in order from lowest bit to highest bit. +// +// Iteration continues to the highest bit as long as v returns true. +// It stops if v returns false. +// +// IterateZeros returns true normally. It returns false if v returns false. +// +// IterateZeros may not be sensitive to changes if bits are changed during +// iteration, by the vistor function for example. +// See ZeroFrom for an iteration method sensitive to changes during iteration. +func (b Bits) IterateZeros(v func(int) bool) bool { + for x, w := range b.Bits { + w = ^w + if w != 0 { + t := mb.TrailingZeros64(w) + i := t // index in w of next 1 bit + for { + n := x<<6 | i + if n >= b.Num { + return true + } + if !v(x<<6 | i) { + return false + } + w >>= uint(t + 1) + if w == 0 { + break + } + t = mb.TrailingZeros64(w) + i += 1 + t + } + } + } + return true +} + +// Not sets receiver z to the complement of b. +func (z *Bits) Not(b Bits) { + if z.Num != b.Num { + *z = New(b.Num) + } + for i, w := range b.Bits { + z.Bits[i] = ^w + } +} + +// OneFrom returns the number of the first 1 bit at or after (from) bit num. +// +// It returns -1 if there is no one bit at or after num. +// +// This provides one way to iterate over one bits. +// To iterate over the one bits, call OneFrom with n = 0 to get the the first +// one bit, then call with the result + 1 to get successive one bits. +// Unlike the Iterate method, this technique is stateless and so allows +// bits to be changed between successive calls. +// +// There is no panic for calling OneFrom with an argument >= b.Num. +// In this case OneFrom simply returns -1. +// +// See also Iterate. +func (b Bits) OneFrom(num int) int { + if num >= b.Num { + return -1 + } + x := num >> 6 + // test for 1 in this word at or after n + if wx := b.Bits[x] >> uint(num&63); wx != 0 { + num += mb.TrailingZeros64(wx) + if num >= b.Num { + return -1 + } + return num + } + x++ + for y, wy := range b.Bits[x:] { + if wy != 0 { + num = (x+y)<<6 | mb.TrailingZeros64(wy) + if num >= b.Num { + return -1 + } + return num + } + } + return -1 +} + +// Or sets z = x | y. +// +// It panics if x and y do not have the same Num. +func (z *Bits) Or(x, y Bits) { + if x.Num != y.Num { + panic("arguments have different number of bits") + } + if z.Num != x.Num { + *z = New(x.Num) + } + for i, w := range y.Bits { + z.Bits[i] = x.Bits[i] | w + } +} + +// OnesCount returns the number of 1 bits. +func (b Bits) OnesCount() (c int) { + if b.Num == 0 { + return 0 + } + last := len(b.Bits) - 1 + for _, w := range b.Bits[:last] { + c += mb.OnesCount64(w) + } + c += mb.OnesCount64(b.Bits[last] << uint(len(b.Bits)*64-b.Num)) + return +} + +// Set sets the bits of z to the bits of x. +func (z *Bits) Set(b Bits) { + if z.Num != b.Num { + *z = New(b.Num) + } + copy(z.Bits, b.Bits) +} + +// SetAll sets z to have all 1 bits. +func (b Bits) SetAll() { + for i := range b.Bits { + b.Bits[i] = ^uint64(0) + } +} + +// SetBit sets the n'th bit to x, where x is a 0 or 1. +// +// It panics if n is out of range +func (b Bits) SetBit(n, x int) { + if n < 0 || n >= b.Num { + panic("bit number out of range") + } + if x == 0 { + b.Bits[n>>6] &^= 1 << uint(n&63) + } else { + b.Bits[n>>6] |= 1 << uint(n&63) + } +} + +// SetBits sets the given bits to 1 in receiver b. +// +// Other bits of b are left unchanged. +// +// It panics if any bit number is out of range, negative or >= the number +// of bits. +func (b Bits) SetBits(nums ...int) { + for _, p := range nums { + b.SetBit(p, 1) + } +} + +// Single returns true if b has exactly one 1 bit. +func (b Bits) Single() bool { + // like OnesCount, but stop as soon as two are found + if b.Num == 0 { + return false + } + c := 0 + last := len(b.Bits) - 1 + for _, w := range b.Bits[:last] { + c += mb.OnesCount64(w) + if c > 1 { + return false + } + } + c += mb.OnesCount64(b.Bits[last] << uint(len(b.Bits)*64-b.Num)) + return c == 1 +} + +// Slice returns a slice with the bit numbers of each 1 bit. +func (b Bits) Slice() (s []int) { + for x, w := range b.Bits { + if w == 0 { + continue + } + t := mb.TrailingZeros64(w) + i := t // index in w of next 1 bit + for { + n := x<<6 | i + if n >= b.Num { + break + } + s = append(s, n) + w >>= uint(t + 1) + if w == 0 { + break + } + t = mb.TrailingZeros64(w) + i += 1 + t + } + } + return +} + +// String returns a readable representation. +// +// The returned string is big-endian, with the highest number bit first. +// +// If Num is 0, an empty string is returned. +func (b Bits) String() (s string) { + if b.Num == 0 { + return "" + } + last := len(b.Bits) - 1 + for _, w := range b.Bits[:last] { + s = fmt.Sprintf("%064b", w) + s + } + lb := b.Num - 64*last + return fmt.Sprintf("%0*b", lb, + b.Bits[last]&(^uint64(0)>>uint(64-lb))) + s +} + +// Xor sets z = x ^ y. +func (z *Bits) Xor(x, y Bits) { + if x.Num != y.Num { + panic("arguments have different number of bits") + } + if z.Num != x.Num { + *z = New(x.Num) + } + for i, w := range y.Bits { + z.Bits[i] = x.Bits[i] ^ w + } +} + +// ZeroFrom returns the number of the first 0 bit at or after (from) bit num. +// +// It returns -1 if there is no zero bit at or after num. +// +// This provides one way to iterate over zero bits. +// To iterate over the zero bits, call ZeroFrom with n = 0 to get the the first +// zero bit, then call with the result + 1 to get successive zero bits. +// Unlike the IterateZeros method, this technique is stateless and so allows +// bits to be changed between successive calls. +// +// There is no panic for calling ZeroFrom with an argument >= b.Num. +// In this case ZeroFrom simply returns -1. +// +// See also IterateZeros. +func (b Bits) ZeroFrom(num int) int { + // code much like OneFrom except words are negated before testing + if num >= b.Num { + return -1 + } + x := num >> 6 + // negate word to test for 0 at or after n + if wx := ^b.Bits[x] >> uint(num&63); wx != 0 { + num += mb.TrailingZeros64(wx) + if num >= b.Num { + return -1 + } + return num + } + x++ + for y, wy := range b.Bits[x:] { + wy = ^wy + if wy != 0 { + num = (x+y)<<6 | mb.TrailingZeros64(wy) + if num >= b.Num { + return -1 + } + return num + } + } + return -1 +} diff --git a/vendor/github.com/soniakeys/bits/go.mod b/vendor/github.com/soniakeys/bits/go.mod new file mode 100644 index 00000000..a2464ef9 --- /dev/null +++ b/vendor/github.com/soniakeys/bits/go.mod @@ -0,0 +1 @@ +module "github.com/soniakeys/bits" diff --git a/vendor/github.com/soniakeys/bits/readme.adoc b/vendor/github.com/soniakeys/bits/readme.adoc new file mode 100644 index 00000000..d39b1aec --- /dev/null +++ b/vendor/github.com/soniakeys/bits/readme.adoc @@ -0,0 +1,38 @@ += Bits + +Bits provides methods on a bit array type. + +The Bits type holds a fixed size array of bits, numbered consecutively +from zero. Some set-like operations are possible, but the API is more +array-like or register-like. + +image:https://godoc.org/github.com/soniakeys/bits?status.svg[link=https://godoc.org/github.com/soniakeys/bits] image:https://travis-ci.org/soniakeys/bits.svg[link=https://travis-ci.org/soniakeys/bits] + +== Motivation and history + +This package evolved from needs of my library of +https://github.com/soniakeys/graph[graph algorithms]. For graph algorithms +a common need is to store a single bit of information per node in a way that +is both fast and memory efficient. I began by using `big.Int` from the standard +library, then wrapped big.Int in a type. From time to time I considered +other publicly available bit array or bit set packages, such as Will +Fitzgerald's popular https://github.com/willf/bitset[bitset], but there were +always little reasons I preferred my own type and methods. My type that +wrapped `big.Int` met my needs until some simple benchmarks indicated it +might be causing performance problems. Some further experiments supported +this hypothesis so I ran further tests with a prototype bit array written +from scratch. Then satisfied that my custom bit array was solving the graph +performance problems, I decided to move it to a separate package with the +idea it might have more general utility. For the initial version of this +package I did the following: + +- implemented a few tests to demonstrate fundamental correctness +- brought over most methods of my type that wrapped big.Int +- changed the index type from the graph-specific node index to a general `int` +- replaced some custom bit-twiddling with use of the new `math/bits` package + in the standard library +- renamed a few methods for clarity +- added a few methods for symmetry +- added a few new methods I had seen a need for in my graph library +- added doc, examples, tests, and more tests for 100% coverage +- added this readme diff --git a/vendor/github.com/soniakeys/graph/.gitignore b/vendor/github.com/soniakeys/graph/.gitignore index 3be61584..a93b25f4 100644 --- a/vendor/github.com/soniakeys/graph/.gitignore +++ b/vendor/github.com/soniakeys/graph/.gitignore @@ -1,2 +1,2 @@ *.dot - +anecdote/anecdote diff --git a/vendor/github.com/soniakeys/graph/.travis.yml b/vendor/github.com/soniakeys/graph/.travis.yml index bcc4f9fe..79332b26 100644 --- a/vendor/github.com/soniakeys/graph/.travis.yml +++ b/vendor/github.com/soniakeys/graph/.travis.yml @@ -1,8 +1,11 @@ sudo: false language: go -# update travis.sh when changing version number here go: - - 1.2.1 - - 1.6 -install: go get -t ./... -script: ./travis.sh + - "1.10.x" + - master +before_script: + - go tool vet -composites=false -printf=false -shift=false . + - go get github.com/client9/misspell/cmd/misspell + - go get github.com/soniakeys/vetc + - misspell -error * */* */*/* + - vetc diff --git a/vendor/github.com/soniakeys/graph/adj.go b/vendor/github.com/soniakeys/graph/adj.go index 165f365b..97ce6dc8 100644 --- a/vendor/github.com/soniakeys/graph/adj.go +++ b/vendor/github.com/soniakeys/graph/adj.go @@ -11,33 +11,32 @@ package graph // be left near their use. import ( - "math" "sort" + + "github.com/soniakeys/bits" ) -// HasParallelSort identifies if a graph contains parallel arcs, multiple arcs +// AnyParallel identifies if a graph contains parallel arcs, multiple arcs // that lead from a node to the same node. // // If the graph has parallel arcs, the results fr and to represent an example -// where there are parallel arcs from node fr to node to. +// where there are parallel arcs from node `fr` to node `to`. // // If there are no parallel arcs, the method returns false -1 -1. // // Multiple loops on a node count as parallel arcs. // -// "Sort" in the method name indicates that sorting is used to detect parallel -// arcs. Compared to method HasParallelMap, this may give better performance -// for small or sparse graphs but will have asymtotically worse performance for -// large dense graphs. -func (g AdjacencyList) HasParallelSort() (has bool, fr, to NI) { - var t NodeList +// See also alt.AnyParallelMap, which can perform better for some large +// or dense graphs. +func (g AdjacencyList) AnyParallel() (has bool, fr, to NI) { + var t []NI for n, to := range g { if len(to) == 0 { continue } // different code in the labeled version, so no code gen. t = append(t[:0], to...) - sort.Sort(t) + sort.Slice(t, func(i, j int) bool { return t[i] < t[j] }) t0 := t[0] for _, to := range t[1:] { if to == t0 { @@ -49,6 +48,33 @@ func (g AdjacencyList) HasParallelSort() (has bool, fr, to NI) { return false, -1, -1 } +// Complement returns the arc-complement of a simple graph. +// +// The result will have an arc for every pair of distinct nodes where there +// is not an arc in g. The complement is valid for both directed and +// undirected graphs. If g is undirected, the complement will be undirected. +// The result will always be a simple graph, having no loops or parallel arcs. +func (g AdjacencyList) Complement() AdjacencyList { + c := make(AdjacencyList, len(g)) + b := bits.New(len(g)) + for n, to := range g { + b.ClearAll() + for _, to := range to { + b.SetBit(int(to), 1) + } + b.SetBit(n, 1) + ct := make([]NI, len(g)-b.OnesCount()) + i := 0 + b.IterateZeros(func(to int) bool { + ct[i] = NI(to) + i++ + return true + }) + c[n] = ct + } + return c +} + // IsUndirected returns true if g represents an undirected graph. // // Returns true when all non-loop arcs are paired in reciprocal pairs. @@ -84,13 +110,25 @@ func (g AdjacencyList) IsUndirected() (u bool, from, to NI) { return true, -1, -1 } -// Edgelist constructs the edge list rerpresentation of a graph. +// SortArcLists sorts the arc lists of each node of receiver g. // -// An edge is returned for each arc of the graph. For undirected graphs -// this includes reciprocal edges. +// Nodes are not relabeled and the graph remains equivalent. +func (g AdjacencyList) SortArcLists() { + for _, to := range g { + sort.Slice(to, func(i, j int) bool { return to[i] < to[j] }) + } +} + +// ------- Labeled methods below ------- + +// ArcsAsEdges constructs an edge list with an edge for each arc, including +// reciprocals. +// +// This is a simple way to construct an edge list for algorithms that allow +// the duplication represented by the reciprocal arcs. (e.g. Kruskal) // -// See also WeightedEdgeList method. -func (g LabeledAdjacencyList) EdgeList() (el []LabeledEdge) { +// See also LabeledUndirected.Edges for the edge list without this duplication. +func (g LabeledAdjacencyList) ArcsAsEdges() (el []LabeledEdge) { for fr, to := range g { for _, to := range to { el = append(el, LabeledEdge{Edge{NI(fr), to.To}, to.Label}) @@ -99,60 +137,34 @@ func (g LabeledAdjacencyList) EdgeList() (el []LabeledEdge) { return } -// FloydWarshall finds all pairs shortest distances for a simple weighted -// graph without negative cycles. +// DistanceMatrix constructs a distance matrix corresponding to the arcs +// of graph g and weight function w. // -// In result array d, d[i][j] will be the shortest distance from node i -// to node j. Any diagonal element < 0 indicates a negative cycle exists. +// An arc from f to t with WeightFunc return w is represented by d[f][t] == w. +// In case of parallel arcs, the lowest weight is stored. The distance from +// any node to itself d[n][n] is 0, unless the node has a loop with a negative +// weight. If g has no arc from f to distinct t, +Inf is stored for d[f][t]. // -// If g is an undirected graph with no negative edge weights, the result -// array will be a distance matrix, for example as used by package -// github.com/soniakeys/cluster. -func (g LabeledAdjacencyList) FloydWarshall(w WeightFunc) (d [][]float64) { - d = newFWd(len(g)) +// The returned DistanceMatrix is suitable for DistanceMatrix.FloydWarshall. +func (g LabeledAdjacencyList) DistanceMatrix(w WeightFunc) (d DistanceMatrix) { + d = newDM(len(g)) for fr, to := range g { for _, to := range to { - d[fr][to.To] = w(to.Label) - } - } - solveFW(d) - return -} - -// little helper function, makes a blank matrix for FloydWarshall. -func newFWd(n int) [][]float64 { - d := make([][]float64, n) - for i := range d { - di := make([]float64, n) - for j := range di { - if j != i { - di[j] = math.Inf(1) - } - } - d[i] = di - } - return d -} - -// Floyd Warshall solver, once the matrix d is initialized by arc weights. -func solveFW(d [][]float64) { - for k, dk := range d { - for _, di := range d { - dik := di[k] - for j := range d { - if d2 := dik + dk[j]; d2 < di[j] { - di[j] = d2 - } + // < to pick min of parallel arcs (also nicely ignores NaN) + if wt := w(to.Label); wt < d[fr][to.To] { + d[fr][to.To] = wt } } } + return } -// HasArcLabel returns true if g has any arc from node fr to node to -// with label l. +// HasArcLabel returns true if g has any arc from node `fr` to node `to` +// with label `l`. // -// Also returned is the index within the slice of arcs from node fr. -// If no arc from fr to to is present, HasArcLabel returns false, -1. +// Also returned is the index within the slice of arcs from node `fr`. +// If no arc from `fr` to `to` with label `l` is present, HasArcLabel returns +// false, -1. func (g LabeledAdjacencyList) HasArcLabel(fr, to NI, l LI) (bool, int) { t := Half{to, l} for x, h := range g[fr] { @@ -163,22 +175,20 @@ func (g LabeledAdjacencyList) HasArcLabel(fr, to NI, l LI) (bool, int) { return false, -1 } -// HasParallelSort identifies if a graph contains parallel arcs, multiple arcs +// AnyParallel identifies if a graph contains parallel arcs, multiple arcs // that lead from a node to the same node. // // If the graph has parallel arcs, the results fr and to represent an example -// where there are parallel arcs from node fr to node to. +// where there are parallel arcs from node `fr` to node `to`. // // If there are no parallel arcs, the method returns -1 -1. // // Multiple loops on a node count as parallel arcs. // -// "Sort" in the method name indicates that sorting is used to detect parallel -// arcs. Compared to method HasParallelMap, this may give better performance -// for small or sparse graphs but will have asymtotically worse performance for -// large dense graphs. -func (g LabeledAdjacencyList) HasParallelSort() (has bool, fr, to NI) { - var t NodeList +// See also alt.AnyParallelMap, which can perform better for some large +// or dense graphs. +func (g LabeledAdjacencyList) AnyParallel() (has bool, fr, to NI) { + var t []NI for n, to := range g { if len(to) == 0 { continue @@ -188,7 +198,7 @@ func (g LabeledAdjacencyList) HasParallelSort() (has bool, fr, to NI) { for _, to := range to { t = append(t, to.To) } - sort.Sort(t) + sort.Slice(t, func(i, j int) bool { return t[i] < t[j] }) t0 := t[0] for _, to := range t[1:] { if to == t0 { @@ -200,6 +210,42 @@ func (g LabeledAdjacencyList) HasParallelSort() (has bool, fr, to NI) { return false, -1, -1 } +// AnyParallelLabel identifies if a graph contains parallel arcs with the same +// label. +// +// If the graph has parallel arcs with the same label, the results fr and to +// represent an example where there are parallel arcs from node `fr` +// to node `to`. +// +// If there are no parallel arcs, the method returns false -1 Half{}. +// +// Multiple loops on a node count as parallel arcs. +func (g LabeledAdjacencyList) AnyParallelLabel() (has bool, fr NI, to Half) { + var t []Half + for n, to := range g { + if len(to) == 0 { + continue + } + // slightly different code needed here compared to AdjacencyList + t = t[:0] + for _, to := range to { + t = append(t, to) + } + sort.Slice(t, func(i, j int) bool { + return t[i].To < t[j].To || + t[i].To == t[j].To && t[i].Label < t[j].Label + }) + t0 := t[0] + for _, to := range t[1:] { + if to == t0 { + return true, NI(n), t0 + } + t0 = to + } + } + return false, -1, Half{} +} + // IsUndirected returns true if g represents an undirected graph. // // Returns true when all non-loop arcs are paired in reciprocal pairs with @@ -209,6 +255,7 @@ func (g LabeledAdjacencyList) HasParallelSort() (has bool, fr, to NI) { // an additional test not present in the otherwise equivalent unlabeled version // of IsUndirected. func (g LabeledAdjacencyList) IsUndirected() (u bool, from NI, to Half) { + // similar code in LabeledAdjacencyList.Edges unpaired := make(LabeledAdjacencyList, len(g)) for fr, to := range g { arc: // for each arc in g @@ -238,6 +285,17 @@ func (g LabeledAdjacencyList) IsUndirected() (u bool, from NI, to Half) { return true, -1, to } +// ArcLabels constructs the multiset of LIs present in g. +func (g LabeledAdjacencyList) ArcLabels() map[LI]int { + s := map[LI]int{} + for _, to := range g { + for _, to := range to { + s[to.Label]++ + } + } + return s +} + // NegativeArc returns true if the receiver graph contains a negative arc. func (g LabeledAdjacencyList) NegativeArc(w WeightFunc) bool { for _, nbs := range g { @@ -250,6 +308,23 @@ func (g LabeledAdjacencyList) NegativeArc(w WeightFunc) bool { return false } +// ParallelArcsLabel identifies all arcs from node `fr` to node `to` with label `l`. +// +// The returned slice contains an element for each arc from node `fr` to node `to` +// with label `l`. The element value is the index within the slice of arcs from node +// `fr`. +// +// See also the method HasArcLabel, which stops after finding a single arc. +func (g LabeledAdjacencyList) ParallelArcsLabel(fr, to NI, l LI) (p []int) { + t := Half{to, l} + for x, h := range g[fr] { + if h == t { + p = append(p, x) + } + } + return +} + // Unlabeled constructs the unlabeled graph corresponding to g. func (g LabeledAdjacencyList) Unlabeled() AdjacencyList { a := make(AdjacencyList, len(g)) @@ -263,16 +338,15 @@ func (g LabeledAdjacencyList) Unlabeled() AdjacencyList { return a } -// WeightedEdgeList constructs a WeightedEdgeList object from a -// LabeledAdjacencyList. +// WeightedArcsAsEdges constructs a WeightedEdgeList object from the receiver. // -// Internally it calls g.EdgeList() to obtain the Edges member. -// See LabeledAdjacencyList.EdgeList(). -func (g LabeledAdjacencyList) WeightedEdgeList(w WeightFunc) *WeightedEdgeList { +// Internally it calls g.ArcsAsEdges() to obtain the Edges member. +// See LabeledAdjacencyList.ArcsAsEdges(). +func (g LabeledAdjacencyList) WeightedArcsAsEdges(w WeightFunc) *WeightedEdgeList { return &WeightedEdgeList{ - Order: len(g), + Order: g.Order(), WeightFunc: w, - Edges: g.EdgeList(), + Edges: g.ArcsAsEdges(), } } diff --git a/vendor/github.com/soniakeys/graph/adj_RO.go b/vendor/github.com/soniakeys/graph/adj_RO.go index 1d37d14e..fcb79eb7 100644 --- a/vendor/github.com/soniakeys/graph/adj_RO.go +++ b/vendor/github.com/soniakeys/graph/adj_RO.go @@ -8,10 +8,22 @@ package graph // DO NOT EDIT adj_RO.go. The RO is for Read Only. import ( + "errors" + "fmt" "math/rand" - "time" + + "github.com/soniakeys/bits" ) +// ArcDensity returns density for an simple directed graph. +// +// See also ArcDensity function. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) ArcDensity() float64 { + return ArcDensity(len(g), g.ArcSize()) +} + // ArcSize returns the number of arcs in g. // // Note that for an undirected graph without loops, the number of undirected @@ -50,98 +62,34 @@ func (g AdjacencyList) BoundsOk() (ok bool, fr NI, to NI) { return true, -1, to } -// BreadthFirst traverses a directed or undirected graph in breadth first order. +// BreadthFirst traverses a directed or undirected graph in breadth +// first order. // -// Argument start is the start node for the traversal. If r is nil, nodes are -// visited in deterministic order. If a random number generator is supplied, -// nodes at each level are visited in random order. -// -// Argument f can be nil if you have no interest in the FromList path result. -// If FromList f is non-nil, the method populates f.Paths and sets f.MaxLen. -// It does not set f.Leaves. For convenience argument f can be a zero value -// FromList. If f.Paths is nil, the FromList is initialized first. If f.Paths -// is non-nil however, the FromList is used as is. The method uses a value of -// PathEnd.Len == 0 to indentify unvisited nodes. Existing non-zero values -// will limit the traversal. -// -// Traversal calls the visitor function v for each node starting with node -// start. If v returns true, traversal continues. If v returns false, the -// traversal terminates immediately. PathEnd Len and From values are updated -// before calling the visitor function. -// -// On return f.Paths and f.MaxLen are set but not f.Leaves. -// -// Returned is the number of nodes visited and ok = true if the traversal -// ran to completion or ok = false if it was terminated by the visitor -// function returning false. +// Traversal starts at node start and visits the nodes reachable from +// start. The function visit is called for each node visited. Nodes +// not reachable from start are not visited. // // There are equivalent labeled and unlabeled versions of this method. -func (g AdjacencyList) BreadthFirst(start NI, r *rand.Rand, f *FromList, v OkNodeVisitor) (visited int, ok bool) { - switch { - case f == nil: - e := NewFromList(len(g)) - f = &e - case f.Paths == nil: - *f = NewFromList(len(g)) - } - rp := f.Paths - // the frontier consists of nodes all at the same level - frontier := []NI{start} - level := 1 - // assign path when node is put on frontier, - rp[start] = PathEnd{Len: level, From: -1} - for { - f.MaxLen = level - level++ - var next []NI - if r == nil { - for _, n := range frontier { - visited++ - if !v(n) { // visit nodes as they come off frontier - return - } - for _, nb := range g[n] { - if rp[nb].Len == 0 { - next = append(next, nb) - rp[nb] = PathEnd{From: n, Len: level} - } - } - } - } else { // take nodes off frontier at random - for _, i := range r.Perm(len(frontier)) { - n := frontier[i] - // remainder of block same as above - visited++ - if !v(n) { - return - } - for _, nb := range g[n] { - if rp[nb].Len == 0 { - next = append(next, nb) - rp[nb] = PathEnd{From: n, Len: level} - } +// +// See also alt.BreadthFirst, a variant with more options, and +// alt.BreadthFirst2, a direction optimizing variant. +func (g AdjacencyList) BreadthFirst(start NI, visit func(NI)) { + v := bits.New(len(g)) + v.SetBit(int(start), 1) + visit(start) + var next []NI + for frontier := []NI{start}; len(frontier) > 0; { + for _, n := range frontier { + for _, nb := range g[n] { + if v.Bit(int(nb)) == 0 { + v.SetBit(int(nb), 1) + visit(nb) + next = append(next, nb) } } } - if len(next) == 0 { - break - } - frontier = next + frontier, next = next, frontier[:0] } - return visited, true -} - -// BreadthFirstPath finds a single path from start to end with a minimum -// number of nodes. -// -// Returned is the path as list of nodes. -// The result is nil if no path was found. -// -// There are equivalent labeled and unlabeled versions of this method. -func (g AdjacencyList) BreadthFirstPath(start, end NI) []NI { - var f FromList - g.BreadthFirst(start, nil, &f, func(n NI) bool { return n != end }) - return f.PathTo(end, nil) } // Copy makes a deep copy of g. @@ -157,99 +105,68 @@ func (g AdjacencyList) Copy() (c AdjacencyList, ma int) { return } -// DepthFirst traverses a graph depth first. -// -// As it traverses it calls visitor function v for each node. If v returns -// false at any point, the traversal is terminated immediately and DepthFirst -// returns false. Otherwise DepthFirst returns true. -// -// DepthFirst uses argument bm is used as a bitmap to guide the traversal. -// For a complete traversal, bm should be 0 initially. During the -// traversal, bits are set corresponding to each node visited. -// The bit is set before calling the visitor function. +// DepthFirst traverses a directed or undirected graph in depth +// first order. // -// Argument bm can be nil if you have no need for it. -// In this case a bitmap is created internally for one-time use. -// -// Alternatively v can be nil. In this case traversal still proceeds and -// updates the bitmap, which can be a useful result. -// DepthFirst always returns true in this case. -// -// It makes no sense for both bm and v to be nil. In this case DepthFirst -// returns false immediately. +// Traversal starts at node start and visits the nodes reachable from +// start. The function visit is called for each node visited. Nodes +// not reachable from start are not visited. // // There are equivalent labeled and unlabeled versions of this method. -func (g AdjacencyList) DepthFirst(start NI, bm *Bits, v OkNodeVisitor) (ok bool) { - if bm == nil { - if v == nil { - return false - } - bm = &Bits{} - } - var df func(n NI) bool - df = func(n NI) bool { - if bm.Bit(n) == 1 { - return true - } - bm.SetBit(n, 1) - if v != nil && !v(n) { - return false - } - for _, nb := range g[n] { - if !df(nb) { - return false +// +// See also alt.DepthFirst, a variant with more options. +func (g AdjacencyList) DepthFirst(start NI, visit func(NI)) { + v := bits.New(len(g)) + var f func(NI) + f = func(n NI) { + visit(n) + v.SetBit(int(n), 1) + for _, to := range g[n] { + if v.Bit(int(to)) == 0 { + f(to) } } - return true } - return df(start) + f(start) } -// DepthFirstRandom traverses a graph depth first, but following arcs in -// random order among arcs from a single node. -// -// If Rand r is nil, the method creates a new source and generator for -// one-time use. +// Equal compares two graphs for equality. // -// Usage is otherwise like the DepthFirst method. See DepthFirst. +// Note this is simple equality, not isomorphism. Graphs are equal if +// they have the same order and if corresponding nodes have the same +// arcs, although they do not need to be in the same order. // // There are equivalent labeled and unlabeled versions of this method. -func (g AdjacencyList) DepthFirstRandom(start NI, bm *Bits, v OkNodeVisitor, r *rand.Rand) (ok bool) { - if bm == nil { - if v == nil { - return false - } - bm = &Bits{} - } - if r == nil { - r = rand.New(rand.NewSource(time.Now().UnixNano())) +func (g AdjacencyList) Equal(h AdjacencyList) bool { + if len(g) != len(h) { + return false } - var df func(n NI) bool - df = func(n NI) bool { - if bm.Bit(n) == 1 { - return true + for n, gn := range g { + m := map[NI]int{} + for _, to := range gn { + m[to]++ } - bm.SetBit(n, 1) - if v != nil && !v(n) { - return false + for _, to := range h[n] { + m[to]-- } - to := g[n] - for _, i := range r.Perm(len(to)) { - if !df(to[i]) { + for _, c := range m { + if c != 0 { return false } } - return true } - return df(start) + return true } -// HasArc returns true if g has any arc from node fr to node to. +// HasArc returns true if g has any arc from node `fr` to node `to`. // -// Also returned is the index within the slice of arcs from node fr. -// If no arc from fr to to is present, HasArc returns false, -1. +// Also returned is the index within the slice of arcs from node `fr`. +// If no arc from `fr` to `to` is present, HasArc returns false, -1. // // There are equivalent labeled and unlabeled versions of this method. +// +// See also the method ParallelArcs, which finds all parallel arcs from +// `fr` to `to`. func (g AdjacencyList) HasArc(fr, to NI) (bool, int) { for x, h := range g[fr] { if h == to { @@ -259,16 +176,14 @@ func (g AdjacencyList) HasArc(fr, to NI) (bool, int) { return false, -1 } -// HasLoop identifies if a graph contains a loop, an arc that leads from a +// AnyLoop identifies if a graph contains a loop, an arc that leads from a // a node back to the same node. // -// If the graph has a loop, the result is an example node that has a loop. -// // If g contains a loop, the method returns true and an example of a node // with a loop. If there are no loops in g, the method returns false, -1. // // There are equivalent labeled and unlabeled versions of this method. -func (g AdjacencyList) HasLoop() (bool, NI) { +func (g AdjacencyList) AnyLoop() (bool, NI) { for fr, to := range g { for _, to := range to { if NI(fr) == to { @@ -279,37 +194,138 @@ func (g AdjacencyList) HasLoop() (bool, NI) { return false, -1 } -// HasParallelMap identifies if a graph contains parallel arcs, multiple arcs -// that lead from a node to the same node. -// -// If the graph has parallel arcs, the method returns true and -// results fr and to represent an example where there are parallel arcs -// from node fr to node to. -// -// If there are no parallel arcs, the method returns false, -1 -1. +// AddNode maps a node in a supergraph to a subgraph node. // -// Multiple loops on a node count as parallel arcs. +// Argument p must be an NI in supergraph s.Super. AddNode panics if +// p is not a valid node index of s.Super. // -// "Map" in the method name indicates that a Go map is used to detect parallel -// arcs. Compared to method HasParallelSort, this gives better asymtotic -// performance for large dense graphs but may have increased overhead for -// small or sparse graphs. +// AddNode is idempotent in that it does not add a new node to the subgraph if +// a subgraph node already exists mapped to supergraph node p. // -// There are equivalent labeled and unlabeled versions of this method. -func (g AdjacencyList) HasParallelMap() (has bool, fr, to NI) { - for n, to := range g { - if len(to) == 0 { - continue +// The mapped subgraph NI is returned. +func (s *Subgraph) AddNode(p NI) (b NI) { + if int(p) < 0 || int(p) >= s.Super.Order() { + panic(fmt.Sprint("AddNode: NI ", p, " not in supergraph")) + } + if b, ok := s.SubNI[p]; ok { + return b + } + a := s.AdjacencyList + b = NI(len(a)) + s.AdjacencyList = append(a, nil) + s.SuperNI = append(s.SuperNI, p) + s.SubNI[p] = b + return +} + +// AddArc adds an arc to a subgraph. +// +// Arguments fr, to must be NIs in supergraph s.Super. As with AddNode, +// AddArc panics if fr and to are not valid node indexes of s.Super. +// +// The arc specfied by fr, to must exist in s.Super. Further, the number of +// parallel arcs in the subgraph cannot exceed the number of corresponding +// parallel arcs in the supergraph. That is, each arc already added to the +// subgraph counts against the arcs available in the supergraph. If a matching +// arc is not available, AddArc returns an error. +// +// If a matching arc is available, subgraph nodes are added as needed, the +// subgraph arc is added, and the method returns nil. +func (s *Subgraph) AddArc(fr NI, to NI) error { + // verify supergraph NIs first, but without adding subgraph nodes just yet. + if int(fr) < 0 || int(fr) >= s.Super.Order() { + panic(fmt.Sprint("AddArc: NI ", fr, " not in supergraph")) + } + if int(to) < 0 || int(to) >= s.Super.Order() { + panic(fmt.Sprint("AddArc: NI ", to, " not in supergraph")) + } + // count existing matching arcs in subgraph + n := 0 + a := s.AdjacencyList + if bf, ok := s.SubNI[fr]; ok { + if bt, ok := s.SubNI[to]; ok { + // both NIs already exist in subgraph, need to count arcs + bTo := to + bTo = bt + for _, t := range a[bf] { + if t == bTo { + n++ + } + } } - m := map[NI]struct{}{} - for _, to := range to { - if _, ok := m[to]; ok { - return true, NI(n), to + } + // verify matching arcs are available in supergraph + for _, t := range (*s.Super)[fr] { + if t == to { + if n > 0 { + n-- // match existing arc + continue + } + // no more existing arcs need to be matched. nodes can finally + // be added as needed and then the arc can be added. + bf := s.AddNode(fr) + to = s.AddNode(to) + s.AdjacencyList[bf] = append(s.AdjacencyList[bf], to) + return nil // success + } + } + return errors.New("arc not available in supergraph") +} + +func (super AdjacencyList) induceArcs(sub map[NI]NI, sup []NI) AdjacencyList { + s := make(AdjacencyList, len(sup)) + for b, p := range sup { + var a []NI + for _, to := range super[p] { + if bt, ok := sub[to]; ok { + to = bt + a = append(a, to) } - m[to] = struct{}{} } + s[b] = a } - return false, -1, -1 + return s +} + +// InduceList constructs a node-induced subgraph. +// +// The subgraph is induced on receiver graph g. Argument l must be a list of +// NIs in receiver graph g. Receiver g becomes the supergraph of the induced +// subgraph. +// +// Duplicate NIs are allowed in list l. The duplicates are effectively removed +// and only a single corresponding node is created in the subgraph. Subgraph +// NIs are mapped in the order of list l, execpt for ignoring duplicates. +// NIs in l that are not in g will panic. +// +// Returned is the constructed Subgraph object containing the induced subgraph +// and the mappings to the supergraph. +func (g *AdjacencyList) InduceList(l []NI) *Subgraph { + sub, sup := mapList(l) + return &Subgraph{ + Super: g, + SubNI: sub, + SuperNI: sup, + + AdjacencyList: g.induceArcs(sub, sup)} +} + +// InduceBits constructs a node-induced subgraph. +// +// The subgraph is induced on receiver graph g. Argument t must be a bitmap +// representing NIs in receiver graph g. Receiver g becomes the supergraph +// of the induced subgraph. NIs in t that are not in g will panic. +// +// Returned is the constructed Subgraph object containing the induced subgraph +// and the mappings to the supergraph. +func (g *AdjacencyList) InduceBits(t bits.Bits) *Subgraph { + sub, sup := mapBits(t) + return &Subgraph{ + Super: g, + SubNI: sub, + SuperNI: sup, + + AdjacencyList: g.induceArcs(sub, sup)} } // IsSimple checks for loops and parallel arcs. @@ -320,14 +336,14 @@ func (g AdjacencyList) HasParallelMap() (has bool, fr, to NI) { // found, simple returns false and a node that represents a counterexample // to the graph being simple. // -// See also separate methods HasLoop and HasParallel. +// See also separate methods AnyLoop and AnyParallel. // // There are equivalent labeled and unlabeled versions of this method. func (g AdjacencyList) IsSimple() (ok bool, n NI) { - if lp, n := g.HasLoop(); lp { + if lp, n := g.AnyLoop(); lp { return false, n } - if pa, n, _ := g.HasParallelSort(); pa { + if pa, n, _ := g.AnyParallel(); pa { return false, n } return true, -1 @@ -338,50 +354,92 @@ func (g AdjacencyList) IsSimple() (ok bool, n NI) { // An isolated node is one with no arcs going to or from it. // // There are equivalent labeled and unlabeled versions of this method. -func (g AdjacencyList) IsolatedNodes() (i Bits) { - i.SetAll(len(g)) +func (g AdjacencyList) IsolatedNodes() (i bits.Bits) { + i = bits.New(len(g)) + i.SetAll() for fr, to := range g { if len(to) > 0 { - i.SetBit(NI(fr), 0) + i.SetBit(fr, 0) for _, to := range to { - i.SetBit(to, 0) + i.SetBit(int(to), 0) } } } return } -/* -MaxmimalClique finds a maximal clique containing the node n. - -Not sure this is good for anything. It produces a single maximal clique -but there can be multiple maximal cliques containing a given node. -This algorithm just returns one of them, not even necessarily the -largest one. +// Order is the number of nodes in receiver g. +// +// It is simply a wrapper method for the Go builtin len(). +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) Order() int { + // Why a wrapper for len()? Mostly for Directed and Undirected. + // u.Order() is a little nicer than len(u.LabeledAdjacencyList). + return len(g) +} -func (g LabeledAdjacencyList) MaximalClique(n int) []int { - c := []int{n} - var m bitset.BitSet - m.Set(uint(n)) - for fr, to := range g { - if fr == n { - continue +// ParallelArcs identifies all arcs from node `fr` to node `to`. +// +// The returned slice contains an element for each arc from node `fr` to node `to`. +// The element value is the index within the slice of arcs from node `fr`. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also the method HasArc, which stops after finding a single arc. +func (g AdjacencyList) ParallelArcs(fr, to NI) (p []int) { + for x, h := range g[fr] { + if h == to { + p = append(p, x) } - if len(to) < len(c) { - continue + } + return +} + +// Permute permutes the node labeling of receiver g. +// +// Argument p must be a permutation of the node numbers of the graph, +// 0 through len(g)-1. A permutation returned by rand.Perm(len(g)) for +// example is acceptable. +// +// The graph is permuted in place. The graph keeps the same underlying +// memory but values of the graph representation are permuted to produce +// an isomorphic graph. The node previously labeled 0 becomes p[0] and so on. +// See example (or the code) for clarification. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) Permute(p []int) { + old := append(AdjacencyList{}, g...) // shallow copy + for fr, arcs := range old { + for i, to := range arcs { + arcs[i] = NI(p[to]) } - f := 0 - for _, to := range to { - if m.Test(uint(to.To)) { - f++ - if f == len(c) { - c = append(c, to.To) - m.Set(uint(to.To)) - break - } - } + g[p[fr]] = arcs + } +} + +// ShuffleArcLists shuffles the arc lists of each node of receiver g. +// +// For example a node with arcs leading to nodes 3 and 7 might have an +// arc list of either [3 7] or [7 3] after calling this method. The +// connectivity of the graph is not changed. The resulting graph stays +// equivalent but a traversal will encounter arcs in a different +// order. +// +// If Rand r is nil, the rand package default shared source is used. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) ShuffleArcLists(r *rand.Rand) { + ri := rand.Intn + if r != nil { + ri = r.Intn + } + // Knuth-Fisher-Yates + for _, to := range g { + for i := len(to); i > 1; { + j := ri(i) + i-- + to[i], to[j] = to[j], to[i] } } - return c } -*/ diff --git a/vendor/github.com/soniakeys/graph/adj_cg.go b/vendor/github.com/soniakeys/graph/adj_cg.go index a484ee04..b118fe4d 100644 --- a/vendor/github.com/soniakeys/graph/adj_cg.go +++ b/vendor/github.com/soniakeys/graph/adj_cg.go @@ -8,10 +8,22 @@ package graph // DO NOT EDIT adj_RO.go. The RO is for Read Only. import ( + "errors" + "fmt" "math/rand" - "time" + + "github.com/soniakeys/bits" ) +// ArcDensity returns density for an simple directed graph. +// +// See also ArcDensity function. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) ArcDensity() float64 { + return ArcDensity(len(g), g.ArcSize()) +} + // ArcSize returns the number of arcs in g. // // Note that for an undirected graph without loops, the number of undirected @@ -50,98 +62,34 @@ func (g LabeledAdjacencyList) BoundsOk() (ok bool, fr NI, to Half) { return true, -1, to } -// BreadthFirst traverses a directed or undirected graph in breadth first order. +// BreadthFirst traverses a directed or undirected graph in breadth +// first order. // -// Argument start is the start node for the traversal. If r is nil, nodes are -// visited in deterministic order. If a random number generator is supplied, -// nodes at each level are visited in random order. -// -// Argument f can be nil if you have no interest in the FromList path result. -// If FromList f is non-nil, the method populates f.Paths and sets f.MaxLen. -// It does not set f.Leaves. For convenience argument f can be a zero value -// FromList. If f.Paths is nil, the FromList is initialized first. If f.Paths -// is non-nil however, the FromList is used as is. The method uses a value of -// PathEnd.Len == 0 to indentify unvisited nodes. Existing non-zero values -// will limit the traversal. -// -// Traversal calls the visitor function v for each node starting with node -// start. If v returns true, traversal continues. If v returns false, the -// traversal terminates immediately. PathEnd Len and From values are updated -// before calling the visitor function. -// -// On return f.Paths and f.MaxLen are set but not f.Leaves. -// -// Returned is the number of nodes visited and ok = true if the traversal -// ran to completion or ok = false if it was terminated by the visitor -// function returning false. +// Traversal starts at node start and visits the nodes reachable from +// start. The function visit is called for each node visited. Nodes +// not reachable from start are not visited. // // There are equivalent labeled and unlabeled versions of this method. -func (g LabeledAdjacencyList) BreadthFirst(start NI, r *rand.Rand, f *FromList, v OkNodeVisitor) (visited int, ok bool) { - switch { - case f == nil: - e := NewFromList(len(g)) - f = &e - case f.Paths == nil: - *f = NewFromList(len(g)) - } - rp := f.Paths - // the frontier consists of nodes all at the same level - frontier := []NI{start} - level := 1 - // assign path when node is put on frontier, - rp[start] = PathEnd{Len: level, From: -1} - for { - f.MaxLen = level - level++ - var next []NI - if r == nil { - for _, n := range frontier { - visited++ - if !v(n) { // visit nodes as they come off frontier - return - } - for _, nb := range g[n] { - if rp[nb.To].Len == 0 { - next = append(next, nb.To) - rp[nb.To] = PathEnd{From: n, Len: level} - } - } - } - } else { // take nodes off frontier at random - for _, i := range r.Perm(len(frontier)) { - n := frontier[i] - // remainder of block same as above - visited++ - if !v(n) { - return - } - for _, nb := range g[n] { - if rp[nb.To].Len == 0 { - next = append(next, nb.To) - rp[nb.To] = PathEnd{From: n, Len: level} - } +// +// See also alt.BreadthFirst, a variant with more options, and +// alt.BreadthFirst2, a direction optimizing variant. +func (g LabeledAdjacencyList) BreadthFirst(start NI, visit func(NI)) { + v := bits.New(len(g)) + v.SetBit(int(start), 1) + visit(start) + var next []NI + for frontier := []NI{start}; len(frontier) > 0; { + for _, n := range frontier { + for _, nb := range g[n] { + if v.Bit(int(nb.To)) == 0 { + v.SetBit(int(nb.To), 1) + visit(nb.To) + next = append(next, nb.To) } } } - if len(next) == 0 { - break - } - frontier = next + frontier, next = next, frontier[:0] } - return visited, true -} - -// BreadthFirstPath finds a single path from start to end with a minimum -// number of nodes. -// -// Returned is the path as list of nodes. -// The result is nil if no path was found. -// -// There are equivalent labeled and unlabeled versions of this method. -func (g LabeledAdjacencyList) BreadthFirstPath(start, end NI) []NI { - var f FromList - g.BreadthFirst(start, nil, &f, func(n NI) bool { return n != end }) - return f.PathTo(end, nil) } // Copy makes a deep copy of g. @@ -157,99 +105,68 @@ func (g LabeledAdjacencyList) Copy() (c LabeledAdjacencyList, ma int) { return } -// DepthFirst traverses a graph depth first. -// -// As it traverses it calls visitor function v for each node. If v returns -// false at any point, the traversal is terminated immediately and DepthFirst -// returns false. Otherwise DepthFirst returns true. -// -// DepthFirst uses argument bm is used as a bitmap to guide the traversal. -// For a complete traversal, bm should be 0 initially. During the -// traversal, bits are set corresponding to each node visited. -// The bit is set before calling the visitor function. +// DepthFirst traverses a directed or undirected graph in depth +// first order. // -// Argument bm can be nil if you have no need for it. -// In this case a bitmap is created internally for one-time use. -// -// Alternatively v can be nil. In this case traversal still proceeds and -// updates the bitmap, which can be a useful result. -// DepthFirst always returns true in this case. -// -// It makes no sense for both bm and v to be nil. In this case DepthFirst -// returns false immediately. +// Traversal starts at node start and visits the nodes reachable from +// start. The function visit is called for each node visited. Nodes +// not reachable from start are not visited. // // There are equivalent labeled and unlabeled versions of this method. -func (g LabeledAdjacencyList) DepthFirst(start NI, bm *Bits, v OkNodeVisitor) (ok bool) { - if bm == nil { - if v == nil { - return false - } - bm = &Bits{} - } - var df func(n NI) bool - df = func(n NI) bool { - if bm.Bit(n) == 1 { - return true - } - bm.SetBit(n, 1) - if v != nil && !v(n) { - return false - } - for _, nb := range g[n] { - if !df(nb.To) { - return false +// +// See also alt.DepthFirst, a variant with more options. +func (g LabeledAdjacencyList) DepthFirst(start NI, visit func(NI)) { + v := bits.New(len(g)) + var f func(NI) + f = func(n NI) { + visit(n) + v.SetBit(int(n), 1) + for _, to := range g[n] { + if v.Bit(int(to.To)) == 0 { + f(to.To) } } - return true } - return df(start) + f(start) } -// DepthFirstRandom traverses a graph depth first, but following arcs in -// random order among arcs from a single node. -// -// If Rand r is nil, the method creates a new source and generator for -// one-time use. +// Equal compares two graphs for equality. // -// Usage is otherwise like the DepthFirst method. See DepthFirst. +// Note this is simple equality, not isomorphism. Graphs are equal if +// they have the same order and if corresponding nodes have the same +// arcs, although they do not need to be in the same order. // // There are equivalent labeled and unlabeled versions of this method. -func (g LabeledAdjacencyList) DepthFirstRandom(start NI, bm *Bits, v OkNodeVisitor, r *rand.Rand) (ok bool) { - if bm == nil { - if v == nil { - return false - } - bm = &Bits{} - } - if r == nil { - r = rand.New(rand.NewSource(time.Now().UnixNano())) +func (g LabeledAdjacencyList) Equal(h LabeledAdjacencyList) bool { + if len(g) != len(h) { + return false } - var df func(n NI) bool - df = func(n NI) bool { - if bm.Bit(n) == 1 { - return true + for n, gn := range g { + m := map[Half]int{} + for _, to := range gn { + m[to]++ } - bm.SetBit(n, 1) - if v != nil && !v(n) { - return false + for _, to := range h[n] { + m[to]-- } - to := g[n] - for _, i := range r.Perm(len(to)) { - if !df(to[i].To) { + for _, c := range m { + if c != 0 { return false } } - return true } - return df(start) + return true } -// HasArc returns true if g has any arc from node fr to node to. +// HasArc returns true if g has any arc from node `fr` to node `to`. // -// Also returned is the index within the slice of arcs from node fr. -// If no arc from fr to to is present, HasArc returns false, -1. +// Also returned is the index within the slice of arcs from node `fr`. +// If no arc from `fr` to `to` is present, HasArc returns false, -1. // // There are equivalent labeled and unlabeled versions of this method. +// +// See also the method ParallelArcs, which finds all parallel arcs from +// `fr` to `to`. func (g LabeledAdjacencyList) HasArc(fr, to NI) (bool, int) { for x, h := range g[fr] { if h.To == to { @@ -259,16 +176,14 @@ func (g LabeledAdjacencyList) HasArc(fr, to NI) (bool, int) { return false, -1 } -// HasLoop identifies if a graph contains a loop, an arc that leads from a +// AnyLoop identifies if a graph contains a loop, an arc that leads from a // a node back to the same node. // -// If the graph has a loop, the result is an example node that has a loop. -// // If g contains a loop, the method returns true and an example of a node // with a loop. If there are no loops in g, the method returns false, -1. // // There are equivalent labeled and unlabeled versions of this method. -func (g LabeledAdjacencyList) HasLoop() (bool, NI) { +func (g LabeledAdjacencyList) AnyLoop() (bool, NI) { for fr, to := range g { for _, to := range to { if NI(fr) == to.To { @@ -279,37 +194,138 @@ func (g LabeledAdjacencyList) HasLoop() (bool, NI) { return false, -1 } -// HasParallelMap identifies if a graph contains parallel arcs, multiple arcs -// that lead from a node to the same node. -// -// If the graph has parallel arcs, the method returns true and -// results fr and to represent an example where there are parallel arcs -// from node fr to node to. -// -// If there are no parallel arcs, the method returns false, -1 -1. +// AddNode maps a node in a supergraph to a subgraph node. // -// Multiple loops on a node count as parallel arcs. +// Argument p must be an NI in supergraph s.Super. AddNode panics if +// p is not a valid node index of s.Super. // -// "Map" in the method name indicates that a Go map is used to detect parallel -// arcs. Compared to method HasParallelSort, this gives better asymtotic -// performance for large dense graphs but may have increased overhead for -// small or sparse graphs. +// AddNode is idempotent in that it does not add a new node to the subgraph if +// a subgraph node already exists mapped to supergraph node p. // -// There are equivalent labeled and unlabeled versions of this method. -func (g LabeledAdjacencyList) HasParallelMap() (has bool, fr, to NI) { - for n, to := range g { - if len(to) == 0 { - continue +// The mapped subgraph NI is returned. +func (s *LabeledSubgraph) AddNode(p NI) (b NI) { + if int(p) < 0 || int(p) >= s.Super.Order() { + panic(fmt.Sprint("AddNode: NI ", p, " not in supergraph")) + } + if b, ok := s.SubNI[p]; ok { + return b + } + a := s.LabeledAdjacencyList + b = NI(len(a)) + s.LabeledAdjacencyList = append(a, nil) + s.SuperNI = append(s.SuperNI, p) + s.SubNI[p] = b + return +} + +// AddArc adds an arc to a subgraph. +// +// Arguments fr, to must be NIs in supergraph s.Super. As with AddNode, +// AddArc panics if fr and to are not valid node indexes of s.Super. +// +// The arc specfied by fr, to must exist in s.Super. Further, the number of +// parallel arcs in the subgraph cannot exceed the number of corresponding +// parallel arcs in the supergraph. That is, each arc already added to the +// subgraph counts against the arcs available in the supergraph. If a matching +// arc is not available, AddArc returns an error. +// +// If a matching arc is available, subgraph nodes are added as needed, the +// subgraph arc is added, and the method returns nil. +func (s *LabeledSubgraph) AddArc(fr NI, to Half) error { + // verify supergraph NIs first, but without adding subgraph nodes just yet. + if int(fr) < 0 || int(fr) >= s.Super.Order() { + panic(fmt.Sprint("AddArc: NI ", fr, " not in supergraph")) + } + if int(to.To) < 0 || int(to.To) >= s.Super.Order() { + panic(fmt.Sprint("AddArc: NI ", to.To, " not in supergraph")) + } + // count existing matching arcs in subgraph + n := 0 + a := s.LabeledAdjacencyList + if bf, ok := s.SubNI[fr]; ok { + if bt, ok := s.SubNI[to.To]; ok { + // both NIs already exist in subgraph, need to count arcs + bTo := to + bTo.To = bt + for _, t := range a[bf] { + if t == bTo { + n++ + } + } } - m := map[NI]struct{}{} - for _, to := range to { - if _, ok := m[to.To]; ok { - return true, NI(n), to.To + } + // verify matching arcs are available in supergraph + for _, t := range (*s.Super)[fr] { + if t == to { + if n > 0 { + n-- // match existing arc + continue + } + // no more existing arcs need to be matched. nodes can finally + // be added as needed and then the arc can be added. + bf := s.AddNode(fr) + to.To = s.AddNode(to.To) + s.LabeledAdjacencyList[bf] = append(s.LabeledAdjacencyList[bf], to) + return nil // success + } + } + return errors.New("arc not available in supergraph") +} + +func (super LabeledAdjacencyList) induceArcs(sub map[NI]NI, sup []NI) LabeledAdjacencyList { + s := make(LabeledAdjacencyList, len(sup)) + for b, p := range sup { + var a []Half + for _, to := range super[p] { + if bt, ok := sub[to.To]; ok { + to.To = bt + a = append(a, to) } - m[to.To] = struct{}{} } + s[b] = a } - return false, -1, -1 + return s +} + +// InduceList constructs a node-induced subgraph. +// +// The subgraph is induced on receiver graph g. Argument l must be a list of +// NIs in receiver graph g. Receiver g becomes the supergraph of the induced +// subgraph. +// +// Duplicate NIs are allowed in list l. The duplicates are effectively removed +// and only a single corresponding node is created in the subgraph. Subgraph +// NIs are mapped in the order of list l, execpt for ignoring duplicates. +// NIs in l that are not in g will panic. +// +// Returned is the constructed Subgraph object containing the induced subgraph +// and the mappings to the supergraph. +func (g *LabeledAdjacencyList) InduceList(l []NI) *LabeledSubgraph { + sub, sup := mapList(l) + return &LabeledSubgraph{ + Super: g, + SubNI: sub, + SuperNI: sup, + + LabeledAdjacencyList: g.induceArcs(sub, sup)} +} + +// InduceBits constructs a node-induced subgraph. +// +// The subgraph is induced on receiver graph g. Argument t must be a bitmap +// representing NIs in receiver graph g. Receiver g becomes the supergraph +// of the induced subgraph. NIs in t that are not in g will panic. +// +// Returned is the constructed Subgraph object containing the induced subgraph +// and the mappings to the supergraph. +func (g *LabeledAdjacencyList) InduceBits(t bits.Bits) *LabeledSubgraph { + sub, sup := mapBits(t) + return &LabeledSubgraph{ + Super: g, + SubNI: sub, + SuperNI: sup, + + LabeledAdjacencyList: g.induceArcs(sub, sup)} } // IsSimple checks for loops and parallel arcs. @@ -320,14 +336,14 @@ func (g LabeledAdjacencyList) HasParallelMap() (has bool, fr, to NI) { // found, simple returns false and a node that represents a counterexample // to the graph being simple. // -// See also separate methods HasLoop and HasParallel. +// See also separate methods AnyLoop and AnyParallel. // // There are equivalent labeled and unlabeled versions of this method. func (g LabeledAdjacencyList) IsSimple() (ok bool, n NI) { - if lp, n := g.HasLoop(); lp { + if lp, n := g.AnyLoop(); lp { return false, n } - if pa, n, _ := g.HasParallelSort(); pa { + if pa, n, _ := g.AnyParallel(); pa { return false, n } return true, -1 @@ -338,50 +354,92 @@ func (g LabeledAdjacencyList) IsSimple() (ok bool, n NI) { // An isolated node is one with no arcs going to or from it. // // There are equivalent labeled and unlabeled versions of this method. -func (g LabeledAdjacencyList) IsolatedNodes() (i Bits) { - i.SetAll(len(g)) +func (g LabeledAdjacencyList) IsolatedNodes() (i bits.Bits) { + i = bits.New(len(g)) + i.SetAll() for fr, to := range g { if len(to) > 0 { - i.SetBit(NI(fr), 0) + i.SetBit(fr, 0) for _, to := range to { - i.SetBit(to.To, 0) + i.SetBit(int(to.To), 0) } } } return } -/* -MaxmimalClique finds a maximal clique containing the node n. - -Not sure this is good for anything. It produces a single maximal clique -but there can be multiple maximal cliques containing a given node. -This algorithm just returns one of them, not even necessarily the -largest one. +// Order is the number of nodes in receiver g. +// +// It is simply a wrapper method for the Go builtin len(). +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) Order() int { + // Why a wrapper for len()? Mostly for Directed and Undirected. + // u.Order() is a little nicer than len(u.LabeledAdjacencyList). + return len(g) +} -func (g LabeledAdjacencyList) MaximalClique(n int) []int { - c := []int{n} - var m bitset.BitSet - m.Set(uint(n)) - for fr, to := range g { - if fr == n { - continue +// ParallelArcs identifies all arcs from node `fr` to node `to`. +// +// The returned slice contains an element for each arc from node `fr` to node `to`. +// The element value is the index within the slice of arcs from node `fr`. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also the method HasArc, which stops after finding a single arc. +func (g LabeledAdjacencyList) ParallelArcs(fr, to NI) (p []int) { + for x, h := range g[fr] { + if h.To == to { + p = append(p, x) } - if len(to) < len(c) { - continue + } + return +} + +// Permute permutes the node labeling of receiver g. +// +// Argument p must be a permutation of the node numbers of the graph, +// 0 through len(g)-1. A permutation returned by rand.Perm(len(g)) for +// example is acceptable. +// +// The graph is permuted in place. The graph keeps the same underlying +// memory but values of the graph representation are permuted to produce +// an isomorphic graph. The node previously labeled 0 becomes p[0] and so on. +// See example (or the code) for clarification. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) Permute(p []int) { + old := append(LabeledAdjacencyList{}, g...) // shallow copy + for fr, arcs := range old { + for i, to := range arcs { + arcs[i].To = NI(p[to.To]) } - f := 0 - for _, to := range to { - if m.Test(uint(to.To)) { - f++ - if f == len(c) { - c = append(c, to.To) - m.Set(uint(to.To)) - break - } - } + g[p[fr]] = arcs + } +} + +// ShuffleArcLists shuffles the arc lists of each node of receiver g. +// +// For example a node with arcs leading to nodes 3 and 7 might have an +// arc list of either [3 7] or [7 3] after calling this method. The +// connectivity of the graph is not changed. The resulting graph stays +// equivalent but a traversal will encounter arcs in a different +// order. +// +// If Rand r is nil, the rand package default shared source is used. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) ShuffleArcLists(r *rand.Rand) { + ri := rand.Intn + if r != nil { + ri = r.Intn + } + // Knuth-Fisher-Yates + for _, to := range g { + for i := len(to); i > 1; { + j := ri(i) + i-- + to[i], to[j] = to[j], to[i] } } - return c } -*/ diff --git a/vendor/github.com/soniakeys/graph/bits.go b/vendor/github.com/soniakeys/graph/bits.go deleted file mode 100644 index b86703ca..00000000 --- a/vendor/github.com/soniakeys/graph/bits.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2014 Sonia Keys -// License MIT: http://opensource.org/licenses/MIT - -package graph - -import ( - "fmt" - "math/big" -) - -// Bits is bitmap, or bitset, intended to store a single bit of information -// per node of a graph. -// -// The current implementation is backed by a big.Int and so is a reference -// type in the same way a big.Int is. -type Bits struct { - i big.Int -} - -// NewBits constructs a Bits value with the bits ns set to 1. -func NewBits(ns ...NI) (b Bits) { - for _, n := range ns { - b.SetBit(n, 1) - } - return -} - -// AllNot sets n bits of z to the complement of x. -// -// It is a convenience method for SetAll followed by AndNot. -func (z *Bits) AllNot(n int, x Bits) { - var y Bits - y.SetAll(n) - z.AndNot(y, x) -} - -// And sets z = x & y. -func (z *Bits) And(x, y Bits) { - z.i.And(&x.i, &y.i) -} - -// AndNot sets z = x &^ y. -func (z *Bits) AndNot(x, y Bits) { - z.i.AndNot(&x.i, &y.i) -} - -// Bit returns the value of the n'th bit of x. -func (b Bits) Bit(n NI) uint { - return b.i.Bit(int(n)) -} - -// Clear sets all bits to 0. -func (z *Bits) Clear() { - *z = Bits{} -} - -// Format satisfies fmt.Formatter for fmt.Printf and related methods. -// -// graph.Bits format exactly like big.Ints. -func (b Bits) Format(s fmt.State, ch rune) { - b.i.Format(s, ch) -} - -// From returns the position of the first 1 bit at or after (from) position n. -// -// It returns -1 if there is no one bit at or after position n. -// -// This provides one way to iterate over one bits. -// To iterate over the one bits, call with n = 0 to get the the first -// one bit, then call with the result + 1 to get successive one bits. -// Unlike the Iterate method, this technique is stateless and so allows -// bits to be changed between successive calls. -// -// See also Iterate. -// -// (From is just a short word that means "at or after" here; -// it has nothing to do with arc direction.) -func (b Bits) From(n NI) NI { - words := b.i.Bits() - i := int(n) - x := i >> wordExp // x now index of word containing bit i. - if x >= len(words) { - return -1 - } - // test for 1 in this word at or after n - if wx := words[x] >> (uint(i) & (wordSize - 1)); wx != 0 { - return n + NI(trailingZeros(wx)) - } - x++ - for y, wy := range words[x:] { - if wy != 0 { - return NI((x+y)<>= uint(t + 1) - if w == 0 { - break - } - t = trailingZeros(w) - i += 1 + t - } - } - } - return true -} - -// Or sets z = x | y. -func (z *Bits) Or(x, y Bits) { - z.i.Or(&x.i, &y.i) -} - -// PopCount returns the number of 1 bits. -func (b Bits) PopCount() (c int) { - // algorithm selected to be efficient for sparse bit sets. - for _, w := range b.i.Bits() { - for w != 0 { - w &= w - 1 - c++ - } - } - return -} - -// Set sets the bits of z to the bits of x. -func (z *Bits) Set(x Bits) { - z.i.Set(&x.i) -} - -var one = big.NewInt(1) - -// SetAll sets z to have n 1 bits. -// -// It's useful for initializing z to have a 1 for each node of a graph. -func (z *Bits) SetAll(n int) { - z.i.Sub(z.i.Lsh(one, uint(n)), one) -} - -// SetBit sets the n'th bit to b, where be is a 0 or 1. -func (z *Bits) SetBit(n NI, b uint) { - z.i.SetBit(&z.i, int(n), b) -} - -// Single returns true if b has exactly one 1 bit. -func (b Bits) Single() bool { - // like PopCount, but stop as soon as two are found - c := 0 - for _, w := range b.i.Bits() { - for w != 0 { - w &= w - 1 - c++ - if c == 2 { - return false - } - } - } - return c == 1 -} - -// Slice returns a slice with the positions of each 1 bit. -func (b Bits) Slice() (s []NI) { - // (alternative implementation might use Popcount and make to get the - // exact cap slice up front. unclear if that would be better.) - b.Iterate(func(n NI) bool { - s = append(s, n) - return true - }) - return -} - -// Xor sets z = x ^ y. -func (z *Bits) Xor(x, y Bits) { - z.i.Xor(&x.i, &y.i) -} - -// Zero returns true if there are no 1 bits. -func (b Bits) Zero() bool { - return len(b.i.Bits()) == 0 -} - -// trailingZeros returns the number of trailing 0 bits in v. -// -// If v is 0, it returns 0. -func trailingZeros(v big.Word) int { - return deBruijnBits[v&-v*deBruijnMultiple>>deBruijnShift] -} diff --git a/vendor/github.com/soniakeys/graph/bits32.go b/vendor/github.com/soniakeys/graph/bits32.go deleted file mode 100644 index 18e07f9a..00000000 --- a/vendor/github.com/soniakeys/graph/bits32.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2014 Sonia Keys -// License MIT: http://opensource.org/licenses/MIT - -// +build 386 arm - -package graph - -// "word" here is math/big.Word -const ( - wordSize = 32 - wordExp = 5 // 2^5 = 32 -) - -// deBruijn magic numbers used in trailingZeros() -// -// reference: http://graphics.stanford.edu/~seander/bithacks.html -const deBruijnMultiple = 0x077CB531 -const deBruijnShift = 27 - -var deBruijnBits = []int{ - 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, - 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9, -} diff --git a/vendor/github.com/soniakeys/graph/bits64.go b/vendor/github.com/soniakeys/graph/bits64.go deleted file mode 100644 index ab601dd6..00000000 --- a/vendor/github.com/soniakeys/graph/bits64.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2014 Sonia Keys -// License MIT: http://opensource.org/licenses/MIT - -// +build !386,!arm - -package graph - -const ( - wordSize = 64 - wordExp = 6 // 2^6 = 64 -) - -// reference: http://graphics.stanford.edu/~seander/bithacks.html -const deBruijnMultiple = 0x03f79d71b4ca8b09 -const deBruijnShift = 58 - -var deBruijnBits = []int{ - 0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4, - 62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5, - 63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11, - 54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6, -} diff --git a/vendor/github.com/soniakeys/graph/dir.go b/vendor/github.com/soniakeys/graph/dir.go index 508306d1..a9ce5357 100644 --- a/vendor/github.com/soniakeys/graph/dir.go +++ b/vendor/github.com/soniakeys/graph/dir.go @@ -3,12 +3,125 @@ package graph +import ( + "math" +) + // dir.go has methods specific to directed graphs, types Directed and -// LabeledDirected. +// LabeledDirected, also Dominators. // // Methods on Directed are first, with exported methods alphabetized. +// Dominators type and methods are at the end. +//---------------------------- -import "errors" +// Cycles emits all elementary cycles in a directed graph. +// +// The algorithm here is Johnson's. See also the equivalent but generally +// slower alt.TarjanCycles. +func (g Directed) Cycles(emit func([]NI) bool) { + // Johnsons "Finding all the elementary circuits of a directed graph", + // SIAM J. Comput. Vol. 4, No. 1, March 1975. + a := g.AdjacencyList + k := make(AdjacencyList, len(a)) + B := make([]map[NI]bool, len(a)) + blocked := make([]bool, len(a)) + for i := range a { + blocked[i] = true + B[i] = map[NI]bool{} + } + var s NI + var stack []NI + var unblock func(NI) + unblock = func(u NI) { + blocked[u] = false + for w := range B[u] { + delete(B[u], w) + if blocked[w] { + unblock(w) + } + } + } + var circuit func(NI) (bool, bool) + circuit = func(v NI) (found, ok bool) { + f := false + stack = append(stack, v) + blocked[v] = true + for _, w := range k[v] { + if w == s { + if !emit(stack) { + return + } + f = true + } else if !blocked[w] { + switch found, ok = circuit(w); { + case !ok: + return + case found: + f = true + } + } + } + if f { + unblock(v) + } else { + for _, w := range k[v] { + B[w][v] = true + } + } + stack = stack[:len(stack)-1] + return f, true + } + for s = 0; int(s) < len(a); s++ { + // (so there's a little extra n^2 component introduced here that + // comes from not making a proper subgraph but just removing arcs + // and leaving isolated nodes. Iterating over the isolated nodes + // should be very fast though. It seems like it would be a net win + // over creating a subgraph.) + // shallow subgraph + for z := NI(0); z < s; z++ { + k[z] = nil + } + for z := int(s); z < len(a); z++ { + k[z] = a[z] + } + // find scc in k with s + var scc []NI + Directed{k}.StronglyConnectedComponents(func(c []NI) bool { + for _, n := range c { + if n == s { // this is it + scc = c + return false // stop scc search + } + } + return true // keep looking + }) + // clear k + for n := range k { + k[n] = nil + } + // map component + for _, n := range scc { + blocked[n] = false + } + // copy component to k + for _, fr := range scc { + var kt []NI + for _, to := range a[fr] { + if !blocked[to] { + kt = append(kt, to) + } + } + k[fr] = kt + } + if _, ok := circuit(s); !ok { + return + } + // reblock component + for _, n := range scc { + blocked[n] = true + } + } +} // DAGMaxLenPath finds a maximum length path in a directed acyclic graph. // @@ -20,7 +133,7 @@ func (g Directed) DAGMaxLenPath(ordering []NI) (path []NI) { // // Similar code in label.go var n NI - mlp := make([][]NI, len(g.AdjacencyList)) // index by node number + mlp := make([][]NI, g.Order()) // index by node number for i := len(ordering) - 1; i >= 0; i-- { fr := ordering[i] // node number to := g.AdjacencyList[fr] @@ -43,263 +156,99 @@ func (g Directed) DAGMaxLenPath(ordering []NI) (path []NI) { return append([]NI{n}, path...) } -// EulerianCycle finds an Eulerian cycle in a directed multigraph. -// -// * If g has no nodes, result is nil, nil. -// -// * If g is Eulerian, result is an Eulerian cycle with err = nil. -// The cycle result is a list of nodes, where the first and last -// nodes are the same. -// -// * Otherwise, result is nil, error -// -// Internally, EulerianCycle copies the entire graph g. -// See EulerianCycleD for a more space efficient version. -func (g Directed) EulerianCycle() ([]NI, error) { - c, m := g.Copy() - return c.EulerianCycleD(m) -} - -// EulerianCycleD finds an Eulerian cycle in a directed multigraph. -// -// EulerianCycleD is destructive on its receiver g. See EulerianCycle for -// a non-destructive version. -// -// Argument ma must be the correct arc size, or number of arcs in g. +// FromList creates a spanning forest of a graph. // -// * If g has no nodes, result is nil, nil. +// The method populates the From members in f.Paths and returns the FromList. +// Also returned is a bool, true if the receiver is found to be a simple graph +// representing a tree or forest. Loops, or any case of multiple arcs going to +// a node will cause simpleForest to be false. // -// * If g is Eulerian, result is an Eulerian cycle with err = nil. -// The cycle result is a list of nodes, where the first and last -// nodes are the same. +// The FromList return value f will always be a spanning forest of the entire +// graph. The bool return value simpleForest tells if the receiver graph g +// was a simple forest to begin with. // -// * Otherwise, result is nil, error -func (g Directed) EulerianCycleD(ma int) ([]NI, error) { - if len(g.AdjacencyList) == 0 { - return nil, nil - } - e := newEulerian(g.AdjacencyList, ma) - for e.s >= 0 { - v := e.top() // v is node that starts cycle - e.push() - // if Eulerian, we'll always come back to starting node - if e.top() != v { - return nil, errors.New("not balanced") - } - e.keep() - } - if !e.uv.Zero() { - return nil, errors.New("not strongly connected") +// Other members of the FromList are left as zero values. +// Use FromList.RecalcLen and FromList.RecalcLeaves as needed. +func (g Directed) FromList() (f *FromList, simpleForest bool) { + paths := make([]PathEnd, g.Order()) + for i := range paths { + paths[i].From = -1 } - return e.p, nil -} - -// EulerianPath finds an Eulerian path in a directed multigraph. -// -// * If g has no nodes, result is nil, nil. -// -// * If g has an Eulerian path, result is an Eulerian path with err = nil. -// The path result is a list of nodes, where the first node is start. -// -// * Otherwise, result is nil, error -// -// Internally, EulerianPath copies the entire graph g. -// See EulerianPathD for a more space efficient version. -func (g Directed) EulerianPath() ([]NI, error) { - ind := g.InDegree() - var start NI - for n, to := range g.AdjacencyList { - if len(to) > ind[n] { - start = NI(n) - break + simpleForest = true + for fr, to := range g.AdjacencyList { + for _, to := range to { + if int(to) == fr || paths[to].From >= 0 { + simpleForest = false + } else { + paths[to].From = NI(fr) + } } } - c, m := g.Copy() - return c.EulerianPathD(m, start) + return &FromList{Paths: paths}, simpleForest } -// EulerianPathD finds an Eulerian path in a directed multigraph. +// SpanTree builds a tree spanning nodes reachable from the given root. // -// EulerianPathD is destructive on its receiver g. See EulerianPath for -// a non-destructive version. +// The component is spanned by breadth-first search from root. +// The resulting spanning tree in stored a FromList. // -// Argument ma must be the correct arc size, or number of arcs in g. -// Argument start must be a valid start node for the path. +// If FromList.Paths is not the same length as g, it is allocated and +// initialized. This allows a zero value FromList to be passed as f. +// If FromList.Paths is the same length as g, it is used as is and is not +// reinitialized. This allows multiple trees to be spanned in the same +// FromList with successive calls. // -// * If g has no nodes, result is nil, nil. +// For nodes spanned, the Path member of the returned FromList is populated +// with both From and Len values. The MaxLen member will be updated but +// not Leaves. // -// * If g has an Eulerian path, result is an Eulerian path with err = nil. -// The path result is a list of nodes, where the first node is start. -// -// * Otherwise, result is nil, error -func (g Directed) EulerianPathD(ma int, start NI) ([]NI, error) { - if len(g.AdjacencyList) == 0 { - return nil, nil - } - e := newEulerian(g.AdjacencyList, ma) - e.p[0] = start - // unlike EulerianCycle, the first path doesn't have be a cycle. - e.push() - e.keep() - for e.s >= 0 { - start = e.top() - e.push() - // paths after the first must be cycles though - // (as long as there are nodes on the stack) - if e.top() != start { - return nil, errors.New("no Eulerian path") +// Returned is the number of nodes spanned, which will be the number of nodes +// reachable from root, and a bool indicating if these nodes were found to be +// a simply connected tree in the receiver graph g. Any cycles, loops, +// or parallel arcs in the component will cause simpleTree to be false, but +// FromList f will still be populated with a valid spanning tree. +func (g Directed) SpanTree(root NI, f *FromList) (nSpanned int, simpleTree bool) { + a := g.AdjacencyList + p := f.Paths + if len(p) != len(a) { + p = make([]PathEnd, len(a)) + for i := range p { + p[i].From = -1 } - e.keep() + f.Paths = p } - if !e.uv.Zero() { - return nil, errors.New("no Eulerian path") - } - return e.p, nil -} - -// starting at the node on the top of the stack, follow arcs until stuck. -// mark nodes visited, push nodes on stack, remove arcs from g. -func (e *eulerian) push() { - for u := e.top(); ; { - e.uv.SetBit(u, 0) // reset unvisited bit - arcs := e.g[u] - if len(arcs) == 0 { - return // stuck - } - w := arcs[0] // follow first arc - e.s++ // push followed node on stack - e.p[e.s] = w - e.g[u] = arcs[1:] // consume arc - u = w - } -} - -// like push, but for for undirected graphs. -func (e *eulerian) pushUndir() { - for u := e.top(); ; { - e.uv.SetBit(u, 0) - arcs := e.g[u] - if len(arcs) == 0 { - return - } - w := arcs[0] - e.s++ - e.p[e.s] = w - e.g[u] = arcs[1:] // consume arc - // here is the only difference, consume reciprocal arc as well: - a2 := e.g[w] - for x, rx := range a2 { - if rx == u { // here it is - last := len(a2) - 1 - a2[x] = a2[last] // someone else gets the seat - e.g[w] = a2[:last] // and it's gone. - break - } - } - u = w + simpleTree = true + p[root].Len = 1 + type arc struct { + from, to NI } -} - -// starting with the node on top of the stack, move nodes with no arcs. -func (e *eulerian) keep() { - for e.s >= 0 { - n := e.top() - if len(e.g[n]) > 0 { - break - } - e.p[e.m] = n - e.s-- - e.m-- - } -} - -type eulerian struct { - g AdjacencyList // working copy of graph, it gets consumed - m int // number of arcs in g, updated as g is consumed - uv Bits // unvisited - // low end of p is stack of unfinished nodes - // high end is finished path - p []NI // stack + path - s int // stack pointer -} - -func (e *eulerian) top() NI { - return e.p[e.s] -} - -func newEulerian(g AdjacencyList, m int) *eulerian { - e := &eulerian{ - g: g, - m: m, - p: make([]NI, m+1), - } - e.uv.SetAll(len(g)) - return e -} - -// MaximalNonBranchingPaths finds all paths in a directed graph that are -// "maximal" and "non-branching". -// -// A non-branching path is one where path nodes other than the first and last -// have exactly one arc leading to the node and one arc leading from the node, -// thus there is no possibility to branch away to a different path. -// -// A maximal non-branching path cannot be extended to a longer non-branching -// path by including another node at either end. -// -// In the case of a cyclic non-branching path, the first and last elements -// of the path will be the same node, indicating an isolated cycle. -// -// The method calls the emit argument for each path or isolated cycle in g, -// as long as emit returns true. If emit returns false, -// MaximalNonBranchingPaths returns immediately. -func (g Directed) MaximalNonBranchingPaths(emit func([]NI) bool) { - ind := g.InDegree() - var uv Bits - uv.SetAll(len(g.AdjacencyList)) - for v, vTo := range g.AdjacencyList { - if !(ind[v] == 1 && len(vTo) == 1) { - for _, w := range vTo { - n := []NI{NI(v), w} - uv.SetBit(NI(v), 0) - uv.SetBit(w, 0) - wTo := g.AdjacencyList[w] - for ind[w] == 1 && len(wTo) == 1 { - u := wTo[0] - n = append(n, u) - uv.SetBit(u, 0) - w = u - wTo = g.AdjacencyList[w] + var next []arc + frontier := []arc{{-1, root}} + for len(frontier) > 0 { + for _, fa := range frontier { // fa frontier arc + nSpanned++ + l := p[fa.to].Len + 1 + for _, to := range a[fa.to] { + if p[to].Len > 0 { + simpleTree = false + continue } - if !emit(n) { // n is a path - return + p[to] = PathEnd{From: fa.to, Len: l} + if l > f.MaxLen { + f.MaxLen = l } + next = append(next, arc{fa.to, to}) } } + frontier, next = next, frontier[:0] } - // use uv.From rather than uv.Iterate. - // Iterate doesn't work here because we're modifying uv - for b := uv.From(0); b >= 0; b = uv.From(b + 1) { - v := NI(b) - n := []NI{v} - for w := v; ; { - w = g.AdjacencyList[w][0] - uv.SetBit(w, 0) - n = append(n, w) - if w == v { - break - } - } - if !emit(n) { // n is an isolated cycle - return - } - } + return } // Undirected returns copy of g augmented as needed to make it undirected. func (g Directed) Undirected() Undirected { - c, _ := g.AdjacencyList.Copy() // start with a copy - rw := make(AdjacencyList, len(g.AdjacencyList)) // "reciprocals wanted" + c, _ := g.AdjacencyList.Copy() // start with a copy + rw := make(AdjacencyList, g.Order()) // "reciprocals wanted" for fr, to := range g.AdjacencyList { arc: // for each arc in g for _, to := range to { @@ -327,64 +276,13 @@ func (g Directed) Undirected() Undirected { return Undirected{c} } -// StronglyConnectedComponents identifies strongly connected components -// in a directed graph. -// -// Algorithm by David J. Pearce, from "An Improved Algorithm for Finding the -// Strongly Connected Components of a Directed Graph". It is algorithm 3, -// PEA_FIND_SCC2 in -// http://homepages.mcs.vuw.ac.nz/~djp/files/P05.pdf, accessed 22 Feb 2015. -// -// Returned is a list of components, each component is a list of nodes. -/* -func (g Directed) StronglyConnectedComponents() []int { - rindex := make([]int, len(g)) - S := []int{} - index := 1 - c := len(g) - 1 - visit := func(v int) { - root := true - rindex[v] = index - index++ - for _, w := range g[v] { - if rindex[w] == 0 { - visit(w) - } - if rindex[w] < rindex[v] { - rindex[v] = rindex[w] - root = false - } - } - if root { - index-- - for top := len(S) - 1; top >= 0 && rindex[v] <= rindex[top]; top-- { - w = rindex[top] - S = S[:top] - rindex[w] = c - index-- - } - rindex[v] = c - c-- - } else { - S = append(S, v) - } - } - for v := range g { - if rindex[v] == 0 { - visit(v) - } - } - return rindex -} -*/ - // Transpose constructs a new adjacency list with all arcs reversed. // // For every arc from->to of g, the result will have an arc to->from. // Transpose also counts arcs as it traverses and returns ma the number of arcs // in g (equal to the number of arcs in the result.) func (g Directed) Transpose() (t Directed, ma int) { - ta := make(AdjacencyList, len(g.AdjacencyList)) + ta := make(AdjacencyList, g.Order()) for n, nbs := range g.AdjacencyList { for _, nb := range nbs { ta[nb] = append(ta[nb], NI(n)) @@ -394,6 +292,102 @@ func (g Directed) Transpose() (t Directed, ma int) { return Directed{ta}, ma } +// Cycles emits all elementary cycles in a directed graph. +// +// The algorithm here is Johnson's. See also the equivalent but generally +// slower alt.TarjanCycles. +func (g LabeledDirected) Cycles(emit func([]Half) bool) { + a := g.LabeledAdjacencyList + k := make(LabeledAdjacencyList, len(a)) + B := make([]map[NI]bool, len(a)) + blocked := make([]bool, len(a)) + for i := range a { + blocked[i] = true + B[i] = map[NI]bool{} + } + var s NI + var stack []Half + var unblock func(NI) + unblock = func(u NI) { + blocked[u] = false + for w := range B[u] { + delete(B[u], w) + if blocked[w] { + unblock(w) + } + } + } + var circuit func(NI) (bool, bool) + circuit = func(v NI) (found, ok bool) { + f := false + blocked[v] = true + for _, w := range k[v] { + if w.To == s { + if !emit(append(stack, w)) { + return + } + f = true + } else if !blocked[w.To] { + stack = append(stack, w) + switch found, ok = circuit(w.To); { + case !ok: + return + case found: + f = true + } + stack = stack[:len(stack)-1] + } + } + if f { + unblock(v) + } else { + for _, w := range k[v] { + B[w.To][v] = true + } + } + return f, true + } + for s = 0; int(s) < len(a); s++ { + for z := NI(0); z < s; z++ { + k[z] = nil + } + for z := int(s); z < len(a); z++ { + k[z] = a[z] + } + var scc []NI + LabeledDirected{k}.StronglyConnectedComponents(func(c []NI) bool { + for _, n := range c { + if n == s { + scc = c + return false + } + } + return true + }) + for n := range k { + k[n] = nil + } + for _, n := range scc { + blocked[n] = false + } + for _, fr := range scc { + var kt []Half + for _, to := range a[fr] { + if !blocked[to.To] { + kt = append(kt, to) + } + } + k[fr] = kt + } + if _, ok := circuit(s); !ok { + return + } + for _, n := range scc { + blocked[n] = true + } + } +} + // DAGMaxLenPath finds a maximum length path in a directed acyclic graph. // // Length here means number of nodes or arcs, not a sum of arc weights. @@ -408,7 +402,7 @@ func (g LabeledDirected) DAGMaxLenPath(ordering []NI) (n NI, path []Half) { // Visits each arc once. Time complexity O(m). // // Similar code in dir.go. - mlp := make([][]Half, len(g.LabeledAdjacencyList)) // index by node number + mlp := make([][]Half, g.Order()) // index by node number for i := len(ordering) - 1; i >= 0; i-- { fr := ordering[i] // node number to := g.LabeledAdjacencyList[fr] @@ -431,36 +425,437 @@ func (g LabeledDirected) DAGMaxLenPath(ordering []NI) (n NI, path []Half) { return } -// FromListLabels transposes a labeled graph into a FromList and associated -// list of labels. +// FromList creates a spanning forest of a graph. // -// Receiver g should be connected as a tree or forest. Specifically no node -// can have multiple incoming arcs. If any node n in g has multiple incoming -// arcs, the method returns (nil, nil, n) where n is a node with multiple -// incoming arcs. +// The method populates the From members in f.Paths and returns the FromList. +// Also returned is a list of labels corresponding to the from arcs, and a +// bool, true if the receiver is found to be a simple graph representing +// a tree or forest. Loops, or any case of multiple arcs going to a node +// will cause simpleForest to be false. // -// Otherwise (normally) the method populates the From members in a -// FromList.Path, populates a slice of labels, and returns the FromList, -// labels, and -1. +// The FromList return value f will always be a spanning forest of the entire +// graph. The bool return value simpleForest tells if the receiver graph g +// was a simple forest to begin with. // // Other members of the FromList are left as zero values. // Use FromList.RecalcLen and FromList.RecalcLeaves as needed. -func (g LabeledDirected) FromListLabels() (*FromList, []LI, NI) { - labels := make([]LI, len(g.LabeledAdjacencyList)) - paths := make([]PathEnd, len(g.LabeledAdjacencyList)) +func (g LabeledDirected) FromList() (f *FromList, labels []LI, simpleForest bool) { + labels = make([]LI, g.Order()) + paths := make([]PathEnd, g.Order()) for i := range paths { paths[i].From = -1 } + simpleForest = true for fr, to := range g.LabeledAdjacencyList { for _, to := range to { - if paths[to.To].From >= 0 { - return nil, nil, to.To + if int(to.To) == fr || paths[to.To].From >= 0 { + simpleForest = false + } else { + paths[to.To].From = NI(fr) + labels[to.To] = to.Label } - paths[to.To].From = NI(fr) - labels[to.To] = to.Label } } - return &FromList{Paths: paths}, labels, -1 + return &FromList{Paths: paths}, labels, simpleForest +} + +// NegativeCycles emits all cycles with negative cycle distance. +// +// The emit function is called for each cycle found. Emit must return true +// to continue cycle enumeration. If emit returns false, NegativeCycles +// stops and returns immediately. +// +// The method mutates receiver g while it runs. Access to g before +// NegativeCycles returns, such as during the emit callback, will find +// g altered. G is completely restored when NegativeCycles returns however, +// even if terminated early with a false return from emit. +// +// If mutations on g are unacceptable, use g.Copy and run NegativeCycles on +// the copy. +// +// See also: +// +// * NegativeCycle, which finds a single example of a negative cycle if +// one exists. +// +// * HasNegativeCycle, which detects if a negative cycle exists. +// +// * BellmanFord, which also detects negative cycles. +// +// * Cycles, from which negative cycles can be filtered. +// +// * alt.NegativeCycles, which uses less memory but is generally slower. +func (g LabeledDirected) NegativeCycles(w WeightFunc, emit func([]Half) bool) { + // Implementation of "Finding all the negative cycles in a directed graph" + // by Takeo Yamada and Harunobu Kinoshita, Discrete Applied Mathematics + // 118 (2002) 279–291. + newNegCyc(g, w, emit).all_nc(LabeledPath{}) +} + +type negCyc struct { + g LabeledDirected + w WeightFunc + emit func([]Half) bool + a LabeledAdjacencyList + tr AdjacencyList + d0, d1 []float64 + dc []float64 + bt [][]fromHalf + btLast []int +} + +func newNegCyc(g LabeledDirected, w WeightFunc, emit func([]Half) bool) *negCyc { + nc := &negCyc{g: g, w: w, emit: emit} + nc.a = g.LabeledAdjacencyList + // transpose to make it easier to find from-arcs. + lt, _ := g.UnlabeledTranspose() + nc.tr = lt.AdjacencyList + nc.d0 = make([]float64, len(nc.a)) + nc.d1 = make([]float64, len(nc.a)) + nc.dc = make([]float64, len(nc.a)) + nc.btLast = make([]int, len(nc.a)) + nc.bt = make([][]fromHalf, len(nc.a)) + for i := range nc.bt { + nc.bt[i] = make([]fromHalf, len(nc.a)) + } + return nc +} + +func (nc *negCyc) all_nc(F LabeledPath) bool { + var C []Half + var R LabeledPath + // Step 1 + if len(F.Path) != 0 { + return nc.step2(F) + } + C = nc.g.NegativeCycle(nc.w) + if len(C) == 0 { + return true + } + // prep step 4 with no F: + F.Start = C[len(C)-1].To + R = LabeledPath{F.Start, C} + return nc.step4(C, F, R) +} + +func (nc *negCyc) step2(F LabeledPath) bool { + fEnd := F.Path[len(F.Path)-1].To + wF := F.Distance(nc.w) + dL, πL := nc.zL(F, fEnd, wF) + if !(dL < 0) { + return true + } + if len(πL) > 0 { + C := append(F.Path, πL...) + R := LabeledPath{fEnd, πL} + return nc.step4(C, F, R) + } + return nc.step3(F, fEnd, wF) +} + +func (nc *negCyc) step3(F LabeledPath, fEnd NI, wF float64) bool { + πΓ := nc.zΓ(F, wF) + if len(πΓ) > 0 { + // prep for step 4 + C := append(F.Path, πΓ...) + R := LabeledPath{fEnd, πΓ} + return nc.step4(C, F, R) + } + return nc.step5(F, fEnd) +} + +func (nc *negCyc) step4(C []Half, F, R LabeledPath) (ok bool) { + // C is a new cycle. + // F is fixed path to be extended and is a prefix of C. + // R is the remainder of C + if ok = nc.emit(C); !ok { + return + } + // for each arc in R, if not the first arc, + // extend F by the arc of the previous iteration. + // remove arc from g, + // Then make the recursive call, then put the arc back in g. + // + // after loop, replace arcs from the two stacks. + type frto struct { + fr NI + to []Half + } + var frStack [][]arc + var toStack []frto + var fr0 NI + var to0 Half + for i, h := range R.Path { + if i > 0 { + // extend F by arc {fr0 to0}, the arc of the previous iteration. + // Remove arcs to to0.To and save on stack. + // Remove arcs from arc0.fr and save on stack. + F.Path = append(F.Path, to0) + frStack = append(frStack, nc.cutTo(to0.To)) + toStack = append(toStack, frto{fr0, nc.a[fr0]}) + nc.a[fr0] = nil + } + toList := nc.a[R.Start] + for j, to := range toList { + if to == h { + last := len(toList) - 1 + toList[j], toList[last] = toList[last], toList[j] + nc.a[R.Start] = toList[:last] + ok = nc.all_nc(F) // return value + toList[last], toList[j] = toList[j], toList[last] + nc.a[R.Start] = toList + break + } + } + if !ok { + break + } + fr0 = R.Start + to0 = h + R.Start = h.To + } + for i := len(frStack) - 1; i >= 0; i-- { + nc.a[toStack[i].fr] = toStack[i].to + nc.restore(frStack[i]) + } + return +} + +func (nc *negCyc) step5(F LabeledPath, fEnd NI) (ok bool) { + // Step 5 (uncertain case) + // + // For each arc from end of F, search each case of extending + // F by that arc. + // + // before loop: save arcs from current path end, + // replace them with room for a single arc. + // extend F by room for one more arc, + ok = true + save := nc.a[fEnd] + nc.a[fEnd] = []Half{{}} + last := len(F.Path) + F.Path = append(F.Path, Half{}) + for _, h := range save { + // in each iteration, set the final arc in F, and the single + // outgoing arc, and save and clear all inbound arcs to the + // new end node. make recursive call, then restore saved + // inbound arcs for the node. + F.Path[last] = h + nc.a[fEnd][0] = h + save := nc.cutTo(h.To) + ok = nc.all_nc(F) + nc.restore(save) + if !ok { + break + } + } + // after loop, restore saved outgoing arcs in g. + nc.a[fEnd] = save + return +} + +type arc struct { + n NI // node that had an arc cut from its toList + x int // index of arc that was swapped to the end of the list +} + +// modify a cutting all arcs to node n. return list of cut arcs than +// can be processed in reverse order to restore changes to a +func (nc *negCyc) cutTo(n NI) (c []arc) { + for _, fr := range nc.tr[n] { + toList := nc.a[fr] + for x := 0; x < len(toList); { + to := toList[x] + if to.To == n { + c = append(c, arc{fr, x}) + last := len(toList) - 1 + toList[x], toList[last] = toList[last], toList[x] + toList = toList[:last] + } else { + x++ + } + } + nc.a[fr] = toList + } + return +} + +func (nc *negCyc) restore(c []arc) { + for i := len(c) - 1; i >= 0; i-- { + r := c[i] + toList := nc.a[r.n] + last := len(toList) + toList = toList[:last+1] + toList[r.x], toList[last] = toList[last], toList[r.x] + nc.a[r.n] = toList + } +} + +func (nc *negCyc) zL(F LabeledPath, fEnd NI, wp float64) (float64, []Half) { + π, c, d := nc.πj(len(nc.a)-len(F.Path), F.Start, fEnd) + if c < 0 { + return d + wp, π + } + j := len(nc.a) - len(F.Path) - 1 + // G1: cut arcs going to c + saveFr := nc.cutTo(c) + for k := 1; k <= j; k++ { + nc.dc[k] = nc.dj(k, F.Start, c) + } + // G0: also cut arcs coming from c + saveTo := nc.a[c] + nc.a[c] = nil + min := nc.dj(j, F.Start, fEnd) + // G2: restore arcs going to c + nc.restore(saveFr) + for k := 1; k <= j; k++ { + d1 := nc.dc[k] + nc.dj(j+1-k, c, fEnd) + if d1 < min { + min = d1 + } + } + nc.a[c] = saveTo + return min + wp, nil +} + +func (nc *negCyc) dj(j int, v, v0 NI) float64 { + for i := range nc.d0 { + nc.d0[i] = math.Inf(1) + } + nc.d0[v0] = 0 + for ; j > 0; j-- { + for i, d := range nc.d0 { + nc.d1[i] = d + } + for vʹ, d0vʹ := range nc.d0 { + if d0vʹ < math.Inf(1) { + for _, to := range nc.a[vʹ] { + if sum := d0vʹ + nc.w(to.Label); sum < nc.d1[to.To] { + nc.d1[to.To] = sum + } + } + } + } + nc.d0, nc.d1 = nc.d1, nc.d0 + } + return nc.d0[v] +} + +func (nc *negCyc) πj(j int, v, v0 NI) ([]Half, NI, float64) { + for i := range nc.d0 { + nc.d0[i] = math.Inf(1) + nc.btLast[i] = -1 + } + nc.d0[v0] = 0 + for k := 0; k < j; k++ { + for i, d := range nc.d0 { + nc.d1[i] = d + } + btk := nc.bt[k] + for vʹ, d0vʹ := range nc.d0 { + if d0vʹ < math.Inf(1) { + for _, to := range nc.a[vʹ] { + if sum := d0vʹ + nc.w(to.Label); sum < nc.d1[to.To] { + nc.d1[to.To] = sum + btk[to.To] = fromHalf{NI(vʹ), to.Label} + nc.btLast[to.To] = k + } + } + } + } + nc.d0, nc.d1 = nc.d1, nc.d0 + } + p := make([]Half, nc.btLast[v]+1) + m := map[NI]bool{} + c := NI(-1) + to := v + for k := nc.btLast[v]; k >= 0; k-- { + fh := nc.bt[k][to] + p[k] = Half{to, fh.Label} + to = fh.From + if c < 0 { + if m[to] { + c = to + } else { + m[to] = true + } + } + } + return p, c, nc.d0[v] +} + +func (nc *negCyc) zΓ(F LabeledPath, wp float64) []Half { + p, d := nc.a.DijkstraPath(F.Path[len(F.Path)-1].To, F.Start, nc.w) + if !(wp+d < 0) { + return nil + } + return p.Path +} + +// SpanTree builds a tree spanning nodes reachable from the given root. +// +// The component is spanned by breadth-first search from root. +// The resulting spanning tree in stored a FromList, and arc labels optionally +// stored in a slice. +// +// If FromList.Paths is not the same length as g, it is allocated and +// initialized. This allows a zero value FromList to be passed as f. +// If FromList.Paths is the same length as g, it is used as is and is not +// reinitialized. This allows multiple trees to be spanned in the same +// FromList with successive calls. +// +// For nodes spanned, the Path member of the returned FromList is populated +// with both From and Len values. The MaxLen member will be updated but +// not Leaves. +// +// The labels slice will be populated only if it is same length as g. +// Nil can be passed for example if labels are not needed. +// +// Returned is the number of nodes spanned, which will be the number of nodes +// reachable from root, and a bool indicating if these nodes were found to be +// a simply connected tree in the receiver graph g. Any cycles, loops, +// or parallel arcs in the component will cause simpleTree to be false, but +// FromList f will still be populated with a valid spanning tree. +func (g LabeledDirected) SpanTree(root NI, f *FromList, labels []LI) (nSpanned int, simpleTree bool) { + a := g.LabeledAdjacencyList + p := f.Paths + if len(p) != len(a) { + p = make([]PathEnd, len(a)) + for i := range p { + p[i].From = -1 + } + f.Paths = p + } + simpleTree = true + p[root].Len = 1 + type arc struct { + from NI + half Half + } + var next []arc + frontier := []arc{{-1, Half{root, -1}}} + for len(frontier) > 0 { + for _, fa := range frontier { // fa frontier arc + nSpanned++ + l := p[fa.half.To].Len + 1 + for _, to := range a[fa.half.To] { + if p[to.To].Len > 0 { + simpleTree = false + continue + } + p[to.To] = PathEnd{From: fa.half.To, Len: l} + if len(labels) == len(p) { + labels[to.To] = to.Label + } + if l > f.MaxLen { + f.MaxLen = l + } + next = append(next, arc{fa.half.To, to}) + } + } + frontier, next = next, frontier[:0] + } + return } // Transpose constructs a new adjacency list that is the transpose of g. @@ -469,7 +864,7 @@ func (g LabeledDirected) FromListLabels() (*FromList, []LI, NI) { // Transpose also counts arcs as it traverses and returns ma the number of // arcs in g (equal to the number of arcs in the result.) func (g LabeledDirected) Transpose() (t LabeledDirected, ma int) { - ta := make(LabeledAdjacencyList, len(g.LabeledAdjacencyList)) + ta := make(LabeledAdjacencyList, g.Order()) for n, nbs := range g.LabeledAdjacencyList { for _, nb := range nbs { ta[nb.To] = append(ta[nb.To], Half{To: NI(n), Label: nb.Label}) @@ -484,7 +879,7 @@ func (g LabeledDirected) Transpose() (t LabeledDirected, ma int) { func (g LabeledDirected) Undirected() LabeledUndirected { c, _ := g.LabeledAdjacencyList.Copy() // start with a copy // "reciprocals wanted" - rw := make(LabeledAdjacencyList, len(g.LabeledAdjacencyList)) + rw := make(LabeledAdjacencyList, g.Order()) for fr, to := range g.LabeledAdjacencyList { arc: // for each arc in g for _, to := range to { @@ -527,7 +922,7 @@ func (g LabeledDirected) Unlabeled() Directed { // It is equivalent to g.Unlabeled().Transpose() but constructs the result // directly. func (g LabeledDirected) UnlabeledTranspose() (t Directed, ma int) { - ta := make(AdjacencyList, len(g.LabeledAdjacencyList)) + ta := make(AdjacencyList, g.Order()) for n, nbs := range g.LabeledAdjacencyList { for _, nb := range nbs { ta[nb.To] = append(ta[nb.To], NI(n)) @@ -536,3 +931,129 @@ func (g LabeledDirected) UnlabeledTranspose() (t Directed, ma int) { } return Directed{ta}, ma } + +// DominanceFrontiers holds dominance frontiers for all nodes in some graph. +// The frontier for a given node is a set of nodes, represented here as a map. +type DominanceFrontiers []map[NI]struct{} + +// Frontier computes the dominance frontier for a node set. +func (d DominanceFrontiers) Frontier(s map[NI]struct{}) map[NI]struct{} { + fs := map[NI]struct{}{} + for n := range s { + for f := range d[n] { + fs[f] = struct{}{} + } + } + return fs +} + +// Closure computes the closure, or iterated dominance frontier for a node set. +func (d DominanceFrontiers) Closure(s map[NI]struct{}) map[NI]struct{} { + c := map[NI]struct{}{} + e := map[NI]struct{}{} + w := map[NI]struct{}{} + var n NI + for n = range s { + e[n] = struct{}{} + w[n] = struct{}{} + } + for len(w) > 0 { + for n = range w { + break + } + delete(w, n) + for f := range d[n] { + if _, ok := c[f]; !ok { + c[f] = struct{}{} + if _, ok := e[f]; !ok { + e[f] = struct{}{} + w[f] = struct{}{} + } + } + } + } + return c +} + +// Dominators holds immediate dominators. +// +// Dominators is a return type from methods Dominators, PostDominators, and +// Doms. See those methods for construction examples. +// +// The list of immediate dominators represents the "dominator tree" +// (in the same way a FromList represents a tree, but somewhat lighter weight.) +// +// In addition to the exported immediate dominators, the type also retains +// the transpose graph that was used to compute the dominators. +// See PostDominators and Doms for a caution about modifying the transpose +// graph. +type Dominators struct { + Immediate []NI + from interface { // either Directed or LabeledDirected + domFrontiers(Dominators) DominanceFrontiers + } +} + +// Frontiers constructs the dominator frontier for each node. +// +// The frontier for a node is a set of nodes, represented as a map. The +// returned slice has the length of d.Immediate, which is the length of +// the original graph. The frontier is valid however only for nodes of the +// reachable subgraph. Nodes not in the reachable subgraph, those with a +// d.Immediate value of -1, will have a nil map. +func (d Dominators) Frontiers() DominanceFrontiers { + return d.from.domFrontiers(d) +} + +// Set constructs the dominator set for a given node. +// +// The dominator set for a node always includes the node itself as the first +// node in the returned slice, as long as the node was in the subgraph +// reachable from the start node used to construct the dominators. +// If the argument n is a node not in the subgraph, Set returns nil. +func (d Dominators) Set(n NI) []NI { + im := d.Immediate + if im[n] < 0 { + return nil + } + for s := []NI{n}; ; { + if p := im[n]; p < 0 || p == n { + return s + } else { + s = append(s, p) + n = p + } + } +} + +// starting at the node on the top of the stack, follow arcs until stuck. +// mark nodes visited, push nodes on stack, remove arcs from g. +func (e *eulerian) push() { + for u := e.top(); ; { + e.uv.SetBit(int(u), 0) // reset unvisited bit + arcs := e.g[u] + if len(arcs) == 0 { + return // stuck + } + w := arcs[0] // follow first arc + e.s++ // push followed node on stack + e.p[e.s] = w + e.g[u] = arcs[1:] // consume arc + u = w + } +} + +func (e *labEulerian) push() { + for u := e.top().To; ; { + e.uv.SetBit(int(u), 0) // reset unvisited bit + arcs := e.g[u] + if len(arcs) == 0 { + return // stuck + } + w := arcs[0] // follow first arc + e.s++ // push followed node on stack + e.p[e.s] = w + e.g[u] = arcs[1:] // consume arc + u = w.To + } +} diff --git a/vendor/github.com/soniakeys/graph/dir_RO.go b/vendor/github.com/soniakeys/graph/dir_RO.go index 77558a96..6d43d12a 100644 --- a/vendor/github.com/soniakeys/graph/dir_RO.go +++ b/vendor/github.com/soniakeys/graph/dir_RO.go @@ -3,6 +3,13 @@ package graph +import ( + "errors" + "fmt" + + "github.com/soniakeys/bits" +) + // dir_RO.go is code generated from dir_cg.go by directives in graph.go. // Editing dir_cg.go is okay. It is the code generation source. // DO NOT EDIT dir_RO.go. @@ -43,9 +50,10 @@ func (g Directed) Copy() (c Directed, ma int) { func (g Directed) Cyclic() (cyclic bool, fr NI, to NI) { a := g.AdjacencyList fr, to = -1, -1 - var temp, perm Bits - var df func(NI) - df = func(n NI) { + temp := bits.New(len(a)) + perm := bits.New(len(a)) + var df func(int) + df = func(n int) { switch { case temp.Bit(n) == 1: cyclic = true @@ -55,10 +63,10 @@ func (g Directed) Cyclic() (cyclic bool, fr NI, to NI) { } temp.SetBit(n, 1) for _, nb := range a[n] { - df(nb) + df(int(nb)) if cyclic { if fr < 0 { - fr, to = n, nb + fr, to = NI(n), nb } return } @@ -67,58 +75,516 @@ func (g Directed) Cyclic() (cyclic bool, fr NI, to NI) { perm.SetBit(n, 1) } for n := range a { - if perm.Bit(NI(n)) == 1 { + if perm.Bit(n) == 1 { continue } - if df(NI(n)); cyclic { // short circuit as soon as a cycle is found + if df(n); cyclic { // short circuit as soon as a cycle is found break } } return } -// FromList transposes a labeled graph into a FromList. +// DegreeCentralization returns out-degree centralization. +// +// Out-degree of a node is one measure of node centrality and is directly +// available from the adjacency list representation. This allows degree +// centralization for the graph to be very efficiently computed. +// +// The value returned is from 0 to 1 inclusive for simple directed graphs of +// two or more nodes. As a special case, 0 is returned for graphs of 0 or 1 +// nodes. The value returned can be > 1 for graphs with loops or parallel +// edges. +// +// In-degree centralization can be computed as DegreeCentralization of the +// transpose. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Directed) DegreeCentralization() float64 { + a := g.AdjacencyList + if len(a) <= 1 { + return 0 + } + var max, sum int + for _, to := range a { + if len(to) > max { + max = len(to) + } + sum += len(to) + } + l1 := len(a) - 1 + return float64(len(a)*max-sum) / float64(l1*l1) +} + +// Dominators computes the immediate dominator for each node reachable from +// start. +// +// The slice returned as Dominators.Immediate will have the length of +// g.AdjacencyList. Nodes without a path to end will have a value of -1. +// +// See also the method Doms. Internally Dominators must construct the +// transpose of g and also compute a postordering of a spanning tree of the +// subgraph reachable from start. If you happen to have either of these +// computed anyway, it can be more efficient to call Doms directly. +func (g Directed) Dominators(start NI) Dominators { + a := g.AdjacencyList + l := len(a) + // ExampleDoms shows traditional depth-first postorder, but it works to + // generate a reverse preorder. Also breadth-first works instead of + // depth-first and may allow Doms to run a little faster by presenting + // a shallower tree. + post := make([]NI, l) + a.BreadthFirst(start, func(n NI) { + l-- + post[l] = n + }) + tr, _ := g.Transpose() + return g.Doms(tr, post[l:]) +} + +// Doms computes either immediate dominators or postdominators. +// +// The slice returned as Dominators.Immediate will have the length of +// g.AdjacencyList. Nodes without a path to end will have a value of -1. +// +// But see also the simpler methods Dominators and PostDominators. +// +// Doms requires argument tr to be the transpose graph of receiver g, +// and requres argument post to be a post ordering of receiver g. More +// specifically a post ordering of a spanning tree of the subgraph reachable +// from some start node in g. The start node will always be the last node in +// this postordering so it does not need to passed as a separate argument. +// +// Doms can be used to construct either dominators or postdominators. +// To construct dominators on a graph f, generate a postordering p on f +// and call f.Doms(f.Transpose(), p). To construct postdominators, generate +// the transpose t first, then a postordering p on t (not f), and call +// t.Doms(f, p). +// +// Caution: The argument tr is retained in the returned Dominators object +// and is used by the method Dominators.Frontier. It is not deep-copied +// so it is invalid to call Doms, modify the tr graph, and then call Frontier. +func (g Directed) Doms(tr Directed, post []NI) Dominators { + a := g.AdjacencyList + dom := make([]NI, len(a)) + pi := make([]int, len(a)) + for i, n := range post { + pi[n] = i + } + intersect := func(b1, b2 NI) NI { + for b1 != b2 { + for pi[b1] < pi[b2] { + b1 = dom[b1] + } + for pi[b2] < pi[b1] { + b2 = dom[b2] + } + } + return b1 + } + for n := range dom { + dom[n] = -1 + } + start := post[len(post)-1] + dom[start] = start + for changed := false; ; changed = false { + for i := len(post) - 2; i >= 0; i-- { + b := post[i] + var im NI + fr := tr.AdjacencyList[b] + var j int + var fp NI + for j, fp = range fr { + if dom[fp] >= 0 { + im = fp + break + } + } + for _, p := range fr[j:] { + if dom[p] >= 0 { + im = intersect(im, p) + } + } + if dom[b] != im { + dom[b] = im + changed = true + } + } + if !changed { + return Dominators{dom, tr} + } + } +} + +// PostDominators computes the immediate postdominator for each node that can +// reach node end. +// +// The slice returned as Dominators.Immediate will have the length of +// g.AdjacencyList. Nodes without a path to end will have a value of -1. +// +// See also the method Doms. Internally Dominators must construct the +// transpose of g and also compute a postordering of a spanning tree of the +// subgraph of the transpose reachable from end. If you happen to have either +// of these computed anyway, it can be more efficient to call Doms directly. +// +// See the method Doms anyway for the caution note. PostDominators calls +// Doms internally, passing receiver g as Doms argument tr. The caution means +// that it is invalid to call PostDominators, modify the graph g, then call +// Frontier. +func (g Directed) PostDominators(end NI) Dominators { + tr, _ := g.Transpose() + a := tr.AdjacencyList + l := len(a) + post := make([]NI, l) + a.BreadthFirst(end, func(n NI) { + l-- + post[l] = n + }) + return tr.Doms(g, post[l:]) +} + +// called from Dominators.Frontier via interface +func (from Directed) domFrontiers(d Dominators) DominanceFrontiers { + im := d.Immediate + f := make(DominanceFrontiers, len(im)) + for i := range f { + if im[i] >= 0 { + f[i] = map[NI]struct{}{} + } + } + for b, fr := range from.AdjacencyList { + if len(fr) < 2 { + continue + } + imb := im[b] + for _, p := range fr { + for runner := p; runner != imb; runner = im[runner] { + f[runner][NI(b)] = struct{}{} + } + } + } + return f +} + +// Eulerian scans a directed graph to determine if it is Eulerian. // -// Receiver g should be connected as a tree or forest. Specifically no node -// can have multiple incoming arcs. If any node n in g has multiple incoming -// arcs, the method returns (nil, n) where n is a node with multiple -// incoming arcs. +// If the graph represents an Eulerian cycle, it returns -1, -1, nil. // -// Otherwise (normally) the method populates the From members in a -// FromList.Path and returns the FromList and -1. +// If the graph does not represent an Eulerian cycle but does represent an +// Eulerian path, it returns the start and end nodes of the path, and nil. // -// Other members of the FromList are left as zero values. -// Use FromList.RecalcLen and FromList.RecalcLeaves as needed. +// Otherwise it returns an error indicating a reason the graph is non-Eulerian. +// Also in this case it returns a relevant node in either start or end. // -// Unusual cases are parallel arcs and loops. A parallel arc represents -// a case of multiple arcs going to some node and so will lead to a (nil, n) -// return, even though a graph might be considered a multigraph tree. -// A single loop on a node that would otherwise be a root node, though, -// is not a case of multiple incoming arcs and so does not force a (nil, n) -// result. +// See also method EulerianStart, which short-circuits when it finds a start +// node whereas this method completely validates a graph as Eulerian. // // There are equivalent labeled and unlabeled versions of this method. -func (g Directed) FromList() (*FromList, NI) { - paths := make([]PathEnd, len(g.AdjacencyList)) - for i := range paths { - paths[i].From = -1 - } - for fr, to := range g.AdjacencyList { - for _, to := range to { - if paths[to].From >= 0 { - return nil, to +func (g Directed) Eulerian() (start, end NI, err error) { + ind := g.InDegree() + start = -1 + end = -1 + for n, to := range g.AdjacencyList { + switch { + case len(to) > ind[n]: + if start >= 0 { + return NI(n), -1, errors.New("multiple start candidates") + } + if len(to) > ind[n]+1 { + return NI(n), -1, errors.New("excessive out-degree") + } + start = NI(n) + case ind[n] > len(to): + if end >= 0 { + return -1, NI(n), errors.New("multiple end candidates") + } + if ind[n] > len(to)+1 { + return -1, NI(n), errors.New("excessive in-degree") } - paths[to].From = NI(fr) + end = NI(n) + } + } + return start, end, nil +} + +// EulerianCycle finds an Eulerian cycle in a directed multigraph. +// +// * If g has no nodes, result is nil, nil. +// +// * If g is Eulerian, result is an Eulerian cycle with err = nil. +// The first element of the result represents only a start node. +// The remaining elements represent the half arcs of the cycle. +// +// * Otherwise, result is nil, with a non-nil error giving a reason the graph +// is not Eulerian. +// +// Internally, EulerianCycle copies the entire graph g. +// See EulerianCycleD for a more space efficient version. +// +// There are nearly equivalent labeled and unlabeled versions of this method. +// In the labeled version the first element of of the +func (g Directed) EulerianCycle() ([]NI, error) { + c, m := g.Copy() + return c.EulerianCycleD(m) +} + +// EulerianCycleD finds an Eulerian cycle in a directed multigraph. +// +// EulerianCycleD is destructive on its receiver g. See EulerianCycle for +// a non-destructive version. +// +// Argument ma must be the correct arc size, or number of arcs in g. +// +// * If g has no nodes, result is nil, nil. +// +// * If g is Eulerian, result is an Eulerian cycle with err = nil. +// The first element of the result represents only a start node. +// The remaining elements represent the half arcs of the cycle. +// +// * Otherwise, result is nil, with a non-nil error giving a reason the graph +// is not Eulerian. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Directed) EulerianCycleD(ma int) ([]NI, error) { + // algorithm adapted from "Sketch of Eulerian Circuit Algorithm" by + // Carl Lee, accessed at http://www.ms.uky.edu/~lee/ma515fa10/euler.pdf. + if g.Order() == 0 { + return nil, nil + } + e := newEulerian(g.AdjacencyList, ma) + e.p[0] = 0 + for e.s >= 0 { + v := e.top() // v is node that starts cycle + e.push() + // if Eulerian, we'll always come back to starting node + if e.top() != v { + return nil, errors.New("not Eulerian") + } + e.keep() + } + if !e.uv.AllZeros() { + return nil, errors.New("not strongly connected") + } + return e.p, nil +} + +// EulerianPath finds an Eulerian path in a directed multigraph. +// +// * If g has no nodes, result is nil, nil. +// +// * If g has an Eulerian path, result is an Eulerian path with err = nil. +// The first element of the result represents only a start node. +// The remaining elements represent the half arcs of the path. +// +// * Otherwise, result is nil, with a non-nil error giving a reason the graph +// is not Eulerian. +// +// Internally, EulerianPath copies the entire graph g. +// See EulerianPathD for a more space efficient version. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Directed) EulerianPath() ([]NI, error) { + c, m := g.Copy() + start, err := c.EulerianStart() + if err != nil { + return nil, err + } + if start < 0 { + start = 0 + } + return c.EulerianPathD(m, start) +} + +// EulerianPathD finds an Eulerian path in a directed multigraph. +// +// EulerianPathD is destructive on its receiver g. See EulerianPath for +// a non-destructive version. +// +// Argument ma must be the correct arc size, or number of arcs in g. +// Argument start must be a valid start node for the path. +// +// * If g has no nodes, result is nil, nil. +// +// * If g has an Eulerian path starting at start, result is an Eulerian path +// with err = nil. +// The first element of the result represents only a start node. +// The remaining elements represent the half arcs of the path. +// +// * Otherwise, result is nil, with a non-nil error giving a reason the graph +// is not Eulerian. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Directed) EulerianPathD(ma int, start NI) ([]NI, error) { + if g.Order() == 0 { + return nil, nil + } + e := newEulerian(g.AdjacencyList, ma) + e.p[0] = start + // unlike EulerianCycle, the first path doesn't have to be a cycle. + e.push() + e.keep() + for e.s >= 0 { + start = e.top() + e.push() + // paths after the first must be cycles though + // (as long as there are nodes on the stack) + if e.top() != start { + return nil, errors.New("no Eulerian path") + } + e.keep() + } + if !e.uv.AllZeros() { + return nil, errors.New("no Eulerian path") + } + return e.p, nil +} + +// EulerianStart finds a candidate start node for an Eulerian path. +// +// A candidate start node in the directed case has out-degree one greater then +// in-degree. EulerianStart scans the graph returning immediately with the +// node (and err == nil) when it finds such a candidate. +// +// EulerianStart also returns immediately with an error if it finds the graph +// cannot contain an Eulerian path. In this case it also returns a relevant +// node. +// +// If the scan completes without finding a candidate start node, the graph +// represents an Eulerian cycle. In this case it returns -1, nil, and any +// node can be chosen as a start node for an eulerian path. +// +// See also method Eulerian, which completely validates a graph as Eulerian +// whereas this method short-curcuits when it finds a start node. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Directed) EulerianStart() (start NI, err error) { + ind := g.InDegree() + end := -1 + for n, to := range g.AdjacencyList { + switch { + case len(to) > ind[n]: + if len(to) == ind[n]+1 { + return NI(n), nil // candidate start + } + return -1, errors.New("excessive out-degree") + case ind[n] > len(to): + if end >= 0 { + return NI(n), errors.New("multiple end candidates") + } + if ind[n] > len(to)+1 { + return NI(n), errors.New("excessive in-degree") + } + end = n + } + } + return -1, nil // cycle +} + +type eulerian struct { + g AdjacencyList // working copy of graph, it gets consumed + m int // number of arcs in g, updated as g is consumed + uv bits.Bits // unvisited + // low end of p is stack of unfinished nodes + // high end is finished path + p []NI // stack + path + s int // stack pointer +} + +func newEulerian(g AdjacencyList, m int) *eulerian { + e := &eulerian{ + g: g, + m: m, + uv: bits.New(len(g)), + p: make([]NI, m+1), + } + e.uv.SetAll() + return e +} + +// starting with the node on top of the stack, move nodes with no arcs. +func (e *eulerian) keep() { + for e.s >= 0 { + n := e.top() + if len(e.g[n]) > 0 { + break + } + e.p[e.m] = n + e.s-- + e.m-- + } +} + +func (e *eulerian) top() NI { + return e.p[e.s] +} + +// MaximalNonBranchingPaths finds all paths in a directed graph that are +// "maximal" and "non-branching". +// +// A non-branching path is one where path nodes other than the first and last +// have exactly one arc leading to the node and one arc leading from the node, +// thus there is no possibility to branch away to a different path. +// +// A maximal non-branching path cannot be extended to a longer non-branching +// path by including another node at either end. +// +// In the case of a cyclic non-branching path, the first and last nodes +// of the path will be the same node, indicating an isolated cycle. +// +// The method calls the emit argument for each path or isolated cycle in g, +// as long as emit returns true. If emit returns false, +// MaximalNonBranchingPaths returns immediately. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Directed) MaximalNonBranchingPaths(emit func([]NI) bool) { + a := g.AdjacencyList + ind := g.InDegree() + uv := bits.New(g.Order()) + uv.SetAll() + for v, vTo := range a { + if !(ind[v] == 1 && len(vTo) == 1) { + for _, w := range vTo { + n := []NI{NI(v), w} + uv.SetBit(v, 0) + uv.SetBit(int(w), 0) + wTo := a[w] + for ind[w] == 1 && len(wTo) == 1 { + u := wTo[0] + n = append(n, u) + uv.SetBit(int(u), 0) + w = u + wTo = a[w] + } + if !emit(n) { // n is a path + return + } + } + } + } + // use uv.From rather than uv.Iterate. + // Iterate doesn't work here because we're modifying uv + for b := uv.OneFrom(0); b >= 0; b = uv.OneFrom(b + 1) { + v := NI(b) + n := []NI{v} + for w := v; ; { + w = a[w][0] + uv.SetBit(int(w), 0) + n = append(n, w) + if w == v { + break + } + } + if !emit(n) { // n is an isolated cycle + return } } - return &FromList{Paths: paths}, -1 } // InDegree computes the in-degree of each node in g // // There are equivalent labeled and unlabeled versions of this method. func (g Directed) InDegree() []int { - ind := make([]int, len(g.AdjacencyList)) + ind := make([]int, g.Order()) for _, nbs := range g.AdjacencyList { for _, nb := range nbs { ind[nb]++ @@ -127,6 +593,128 @@ func (g Directed) InDegree() []int { return ind } +// AddNode maps a node in a supergraph to a subgraph node. +// +// Argument p must be an NI in supergraph s.Super. AddNode panics if +// p is not a valid node index of s.Super. +// +// AddNode is idempotent in that it does not add a new node to the subgraph if +// a subgraph node already exists mapped to supergraph node p. +// +// The mapped subgraph NI is returned. +func (s *DirectedSubgraph) AddNode(p NI) (b NI) { + if int(p) < 0 || int(p) >= s.Super.Order() { + panic(fmt.Sprint("AddNode: NI ", p, " not in supergraph")) + } + if b, ok := s.SubNI[p]; ok { + return b + } + a := s.Directed.AdjacencyList + b = NI(len(a)) + s.Directed.AdjacencyList = append(a, nil) + s.SuperNI = append(s.SuperNI, p) + s.SubNI[p] = b + return +} + +// AddArc adds an arc to a subgraph. +// +// Arguments fr, to must be NIs in supergraph s.Super. As with AddNode, +// AddArc panics if fr and to are not valid node indexes of s.Super. +// +// The arc specfied by fr, to must exist in s.Super. Further, the number of +// parallel arcs in the subgraph cannot exceed the number of corresponding +// parallel arcs in the supergraph. That is, each arc already added to the +// subgraph counts against the arcs available in the supergraph. If a matching +// arc is not available, AddArc returns an error. +// +// If a matching arc is available, subgraph nodes are added as needed, the +// subgraph arc is added, and the method returns nil. +func (s *DirectedSubgraph) AddArc(fr NI, to NI) error { + // verify supergraph NIs first, but without adding subgraph nodes just yet. + if int(fr) < 0 || int(fr) >= s.Super.Order() { + panic(fmt.Sprint("AddArc: NI ", fr, " not in supergraph")) + } + if int(to) < 0 || int(to) >= s.Super.Order() { + panic(fmt.Sprint("AddArc: NI ", to, " not in supergraph")) + } + // count existing matching arcs in subgraph + n := 0 + a := s.Directed.AdjacencyList + if bf, ok := s.SubNI[fr]; ok { + if bt, ok := s.SubNI[to]; ok { + // both NIs already exist in subgraph, need to count arcs + bTo := to + bTo = bt + for _, t := range a[bf] { + if t == bTo { + n++ + } + } + } + } + // verify matching arcs are available in supergraph + for _, t := range (*s.Super).AdjacencyList[fr] { + if t == to { + if n > 0 { + n-- // match existing arc + continue + } + // no more existing arcs need to be matched. nodes can finally + // be added as needed and then the arc can be added. + bf := s.AddNode(fr) + to = s.AddNode(to) + s.Directed.AdjacencyList[bf] = + append(s.Directed.AdjacencyList[bf], to) + return nil // success + } + } + return errors.New("arc not available in supergraph") +} + +// InduceList constructs a node-induced subgraph. +// +// The subgraph is induced on receiver graph g. Argument l must be a list of +// NIs in receiver graph g. Receiver g becomes the supergraph of the induced +// subgraph. +// +// Duplicate NIs are allowed in list l. The duplicates are effectively removed +// and only a single corresponding node is created in the subgraph. Subgraph +// NIs are mapped in the order of list l, execpt for ignoring duplicates. +// NIs in l that are not in g will panic. +// +// Returned is the constructed Subgraph object containing the induced subgraph +// and the mappings to the supergraph. +func (g *Directed) InduceList(l []NI) *DirectedSubgraph { + sub, sup := mapList(l) + return &DirectedSubgraph{ + Super: g, + SubNI: sub, + SuperNI: sup, + Directed: Directed{ + g.AdjacencyList.induceArcs(sub, sup), + }} +} + +// InduceBits constructs a node-induced subgraph. +// +// The subgraph is induced on receiver graph g. Argument t must be a bitmap +// representing NIs in receiver graph g. Receiver g becomes the supergraph +// of the induced subgraph. NIs in t that are not in g will panic. +// +// Returned is the constructed Subgraph object containing the induced subgraph +// and the mappings to the supergraph. +func (g *Directed) InduceBits(t bits.Bits) *DirectedSubgraph { + sub, sup := mapBits(t) + return &DirectedSubgraph{ + Super: g, + SubNI: sub, + SuperNI: sup, + Directed: Directed{ + g.AdjacencyList.induceArcs(sub, sup), + }} +} + // IsTree identifies trees in directed graphs. // // Return value isTree is true if the subgraph reachable from root is a tree. @@ -136,14 +724,14 @@ func (g Directed) InDegree() []int { // There are equivalent labeled and unlabeled versions of this method. func (g Directed) IsTree(root NI) (isTree, allTree bool) { a := g.AdjacencyList - var v Bits - v.SetAll(len(a)) + v := bits.New(len(a)) + v.SetAll() var df func(NI) bool df = func(n NI) bool { - if v.Bit(n) == 0 { + if v.Bit(int(n)) == 0 { return false } - v.SetBit(n, 0) + v.SetBit(int(n), 0) for _, to := range a[n] { if !df(to) { return false @@ -152,118 +740,151 @@ func (g Directed) IsTree(root NI) (isTree, allTree bool) { return true } isTree = df(root) - return isTree, isTree && v.Zero() + return isTree, isTree && v.AllZeros() +} + +// PageRank computes a significance score for each node of a graph. +// +// The algorithm is credited to Google founders Brin and Lawrence. +// +// Argument d is a damping factor. Reportedly a value of .85 works well. +// Argument n is a number of iterations. Reportedly values of 20 to 50 +// work well. +// +// Returned is the PageRank score for each node of g. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Directed) PageRank(d float64, n int) []float64 { + // Following "PageRank Explained" by Ian Rogers, accessed at + // http://www.cs.princeton.edu/~chazelle/courses/BIB/pagerank.htm + a := g.AdjacencyList + p0 := make([]float64, len(a)) + p1 := make([]float64, len(a)) + for i := range p0 { + p0[i] = 1 + } + d1 := 1 - d + for ; n > 0; n-- { + for i := range p1 { + p1[i] = d1 + } + for fr, to := range a { + f := d / float64(len(to)) + for _, to := range to { + p1[to] += p0[fr] * f + } + } + p0, p1 = p1, p0 + } + return p0 } -// Tarjan identifies strongly connected components in a directed graph using -// Tarjan's algorithm. +// StronglyConnectedComponents identifies strongly connected components in +// a directed graph. +// +// The method calls the emit function for each component identified. The +// argument to emit is the node list of a component. The emit function must +// return true for the method to continue identifying components. If emit +// returns false, the method returns immediately. +// +// Note well: The backing slice for the node list passed to emit is reused +// across emit calls. If you need to retain the node list you must copy it. // -// The method calls the emit argument for each component identified. Each -// component is a list of nodes. A property of the algorithm is that -// components are emitted in reverse topological order of the condensation. -// (See https://en.wikipedia.org/wiki/Strongly_connected_component#Definitions -// for description of condensation.) +// The components emitted represent a partition of the nodes in g. +// So for example, if the first component emitted has the same length as g +// then it will be the only component and it means the entire graph g is +// strongly connected. +// +// See also Condensation which returns a condensation graph in addition +// to the strongly connected components. // // There are equivalent labeled and unlabeled versions of this method. // -// See also TarjanForward and TarjanCondensation. -func (g Directed) Tarjan(emit func([]NI) bool) { - // See "Depth-first search and linear graph algorithms", Robert Tarjan, - // SIAM J. Comput. Vol. 1, No. 2, June 1972. - // - // Implementation here from Wikipedia pseudocode, - // http://en.wikipedia.org/w/index.php?title=Tarjan%27s_strongly_connected_components_algorithm&direction=prev&oldid=647184742 - var indexed, stacked Bits +// The algorithm here is by David Pearce. See also alt.SCCPathBased and +// alt.SCCTarjan. +func (g Directed) StronglyConnectedComponents(emit func([]NI) bool) { + // See Algorithm 3 PEA FIND SCC2(V,E) in "An Improved Algorithm for + // Finding the Strongly Connected Components of a Directed Graph" + // by David J. Pearce. a := g.AdjacencyList - index := make([]int, len(a)) - lowlink := make([]int, len(a)) - x := 0 - var S []NI - var sc func(NI) bool - sc = func(n NI) bool { - index[n] = x - indexed.SetBit(n, 1) - lowlink[n] = x - x++ - S = append(S, n) - stacked.SetBit(n, 1) - for _, nb := range a[n] { - if indexed.Bit(nb) == 0 { - if !sc(nb) { + rindex := make([]int, len(a)) + var S, scc []NI + index := 1 + c := len(a) - 1 + var visit func(NI) bool + visit = func(v NI) bool { + root := true + rindex[v] = index + index++ + for _, w := range a[v] { + if rindex[w] == 0 { + if !visit(w) { return false } - if lowlink[nb] < lowlink[n] { - lowlink[n] = lowlink[nb] - } - } else if stacked.Bit(nb) == 1 { - if index[nb] < lowlink[n] { - lowlink[n] = index[nb] - } + } + if rindex[w] < rindex[v] { + rindex[v] = rindex[w] + root = false } } - if lowlink[n] == index[n] { - var c []NI - for { - last := len(S) - 1 - w := S[last] - S = S[:last] - stacked.SetBit(w, 0) - c = append(c, w) - if w == n { - if !emit(c) { - return false - } - break - } + if !root { + S = append(S, v) + return true + } + scc = scc[:0] + index-- + for last := len(S) - 1; last >= 0; last-- { + w := S[last] + if rindex[v] > rindex[w] { + break } + S = S[:last] + rindex[w] = c + scc = append(scc, w) + index-- } - return true + rindex[v] = c + c-- + return emit(append(scc, v)) } - for n := range a { - if indexed.Bit(NI(n)) == 0 && !sc(NI(n)) { - return + for v := range a { + if rindex[v] == 0 && !visit(NI(v)) { + break } } } -// TarjanForward returns strongly connected components. +// Condensation returns strongly connected components and their +// condensation graph. +// +// A condensation represents a directed acyclic graph. +// Components are ordered in a reverse topological ordering. +// +// See also StronglyConnectedComponents, which returns the components only. // -// It returns components in the reverse order of Tarjan, for situations -// where a forward topological ordering is easier. -func (g Directed) TarjanForward() [][]NI { - var r [][]NI - g.Tarjan(func(c []NI) bool { - r = append(r, c) +// There are equivalent labeled and unlabeled versions of this method. +func (g Directed) Condensation() (scc [][]NI, cd AdjacencyList) { + a := g.AdjacencyList + b := make([]NI, len(a)) // backing slice for scc + g.StronglyConnectedComponents(func(c []NI) bool { + n := copy(b, c) + scc = append(scc, b[:n]) + b = b[n:] return true }) - scc := make([][]NI, len(r)) - last := len(r) - 1 - for i, ci := range r { - scc[last-i] = ci - } - return scc -} - -// TarjanCondensation returns strongly connected components and their -// condensation graph. -// -// Components are ordered in a forward topological ordering. -func (g Directed) TarjanCondensation() (scc [][]NI, cd AdjacencyList) { - scc = g.TarjanForward() - cd = make(AdjacencyList, len(scc)) // return value - cond := make([]NI, len(g.AdjacencyList)) // mapping from g node to cd node - for cn := NI(len(scc) - 1); cn >= 0; cn-- { - c := scc[cn] + cd = make(AdjacencyList, len(scc)) // return value + cond := make([]NI, len(a)) // mapping from g node to cd node + for cn, c := range scc { for _, n := range c { cond[n] = NI(cn) // map g node to cd node } - var tos []NI // list of 'to' nodes - var m Bits // tos map + var tos []NI // list of 'to' nodes + m := bits.New(len(cd)) // tos map m.SetBit(cn, 1) for _, n := range c { - for _, to := range g.AdjacencyList[n] { - if ct := cond[to]; m.Bit(ct) == 0 { - m.SetBit(ct, 1) + for _, to := range a[n] { + if ct := cond[to]; m.Bit(int(ct)) == 0 { + m.SetBit(int(ct), 1) tos = append(tos, ct) } } @@ -282,23 +903,35 @@ func (g Directed) TarjanCondensation() (scc [][]NI, cd AdjacencyList) { // // There are equivalent labeled and unlabeled versions of this method. func (g Directed) Topological() (ordering, cycle []NI) { + i := -1 + return g.dfTopo(func() NI { + i++ + if i < g.Order() { + return NI(i) + } + return -1 + }) +} + +func (g Directed) dfTopo(f func() NI) (ordering, cycle []NI) { a := g.AdjacencyList ordering = make([]NI, len(a)) i := len(ordering) - var temp, perm Bits + temp := bits.New(len(a)) + perm := bits.New(len(a)) var cycleFound bool var cycleStart NI var df func(NI) df = func(n NI) { switch { - case temp.Bit(n) == 1: + case temp.Bit(int(n)) == 1: cycleFound = true cycleStart = n return - case perm.Bit(n) == 1: + case perm.Bit(int(n)) == 1: return } - temp.SetBit(n, 1) + temp.SetBit(int(n), 1) for _, nb := range a[n] { df(nb) if cycleFound { @@ -316,21 +949,24 @@ func (g Directed) Topological() (ordering, cycle []NI) { return } } - temp.SetBit(n, 0) - perm.SetBit(n, 1) + temp.SetBit(int(n), 0) + perm.SetBit(int(n), 1) i-- ordering[i] = n } - for n := range a { - if perm.Bit(NI(n)) == 1 { + for { + n := f() + if n < 0 { + return ordering[i:], nil + } + if perm.Bit(int(n)) == 1 { continue } - df(NI(n)) + df(n) if cycleFound { return nil, cycle } } - return ordering, nil } // TopologicalKahn computes a topological ordering of a directed acyclic graph. @@ -349,7 +985,7 @@ func (g Directed) TopologicalKahn(tr Directed) (ordering, cycle []NI) { var L, S []NI // rem for "remaining edges," this function makes a local copy of the // in-degrees and consumes that instead of consuming an input. - rem := make([]int, len(g.AdjacencyList)) + rem := make([]int, g.Order()) for n, fr := range tr.AdjacencyList { if len(fr) == 0 { // accumulate "set of all nodes with no incoming edges" @@ -393,3 +1029,63 @@ func (g Directed) TopologicalKahn(tr Directed) (ordering, cycle []NI) { } return L, nil } + +// TopologicalSubgraph computes a topological ordering of a subgraph of a +// directed acyclic graph. +// +// The subgraph considered is that reachable from the specified node list. +// +// For an acyclic subgraph, return value ordering is a permutation of reachable +// node numbers in topologically sorted order and cycle will be nil. If the +// subgraph is found to be cyclic, ordering will be nil and cycle will be +// the path of a found cycle. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Directed) TopologicalSubgraph(nodes []NI) (ordering, cycle []NI) { + i := -1 + return g.dfTopo(func() NI { + i++ + if i < len(nodes) { + return nodes[i] + } + return -1 + }) +} + +// TransitiveClosure returns the transitive closure of directed graph g. +// +// The algorithm is Warren's, which works most naturally with an adjacency +// matrix representation. The returned transitive closure is left in this +// adjacency matrix representation. For a graph g of order n, matrix tc +// is returned as a length n slice of length n bits.Bits values, where +// tc[from].Bit(to) == 1 represents an arc of the transitive closure. +func (g Directed) TransitiveClosure() []bits.Bits { + // construct adjacency matrix + a := g.AdjacencyList + t := make([]bits.Bits, len(a)) + for n := range t { + tn := bits.New(len(a)) + for _, to := range a[n] { + tn.SetBit(int(to), 1) + } + t[n] = tn + } + // above diagonal + for i := 1; i < len(a); i++ { + ti := t[i] + for k := 0; k < i; k++ { + if ti.Bit(k) == 1 { + ti.Or(ti, t[k]) + } + } + } + // below diagonal + for i, ti := range t[:len(a)-1] { + for k := i + 1; k < len(a); k++ { + if ti.Bit(k) == 1 { + ti.Or(ti, t[k]) + } + } + } + return t +} diff --git a/vendor/github.com/soniakeys/graph/dir_cg.go b/vendor/github.com/soniakeys/graph/dir_cg.go index 2b82f4f1..07cc9655 100644 --- a/vendor/github.com/soniakeys/graph/dir_cg.go +++ b/vendor/github.com/soniakeys/graph/dir_cg.go @@ -3,6 +3,13 @@ package graph +import ( + "errors" + "fmt" + + "github.com/soniakeys/bits" +) + // dir_RO.go is code generated from dir_cg.go by directives in graph.go. // Editing dir_cg.go is okay. It is the code generation source. // DO NOT EDIT dir_RO.go. @@ -43,9 +50,10 @@ func (g LabeledDirected) Copy() (c LabeledDirected, ma int) { func (g LabeledDirected) Cyclic() (cyclic bool, fr NI, to Half) { a := g.LabeledAdjacencyList fr, to.To = -1, -1 - var temp, perm Bits - var df func(NI) - df = func(n NI) { + temp := bits.New(len(a)) + perm := bits.New(len(a)) + var df func(int) + df = func(n int) { switch { case temp.Bit(n) == 1: cyclic = true @@ -55,10 +63,10 @@ func (g LabeledDirected) Cyclic() (cyclic bool, fr NI, to Half) { } temp.SetBit(n, 1) for _, nb := range a[n] { - df(nb.To) + df(int(nb.To)) if cyclic { if fr < 0 { - fr, to = n, nb + fr, to = NI(n), nb } return } @@ -67,58 +75,516 @@ func (g LabeledDirected) Cyclic() (cyclic bool, fr NI, to Half) { perm.SetBit(n, 1) } for n := range a { - if perm.Bit(NI(n)) == 1 { + if perm.Bit(n) == 1 { continue } - if df(NI(n)); cyclic { // short circuit as soon as a cycle is found + if df(n); cyclic { // short circuit as soon as a cycle is found break } } return } -// FromList transposes a labeled graph into a FromList. +// DegreeCentralization returns out-degree centralization. +// +// Out-degree of a node is one measure of node centrality and is directly +// available from the adjacency list representation. This allows degree +// centralization for the graph to be very efficiently computed. +// +// The value returned is from 0 to 1 inclusive for simple directed graphs of +// two or more nodes. As a special case, 0 is returned for graphs of 0 or 1 +// nodes. The value returned can be > 1 for graphs with loops or parallel +// edges. +// +// In-degree centralization can be computed as DegreeCentralization of the +// transpose. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledDirected) DegreeCentralization() float64 { + a := g.LabeledAdjacencyList + if len(a) <= 1 { + return 0 + } + var max, sum int + for _, to := range a { + if len(to) > max { + max = len(to) + } + sum += len(to) + } + l1 := len(a) - 1 + return float64(len(a)*max-sum) / float64(l1*l1) +} + +// Dominators computes the immediate dominator for each node reachable from +// start. +// +// The slice returned as Dominators.Immediate will have the length of +// g.AdjacencyList. Nodes without a path to end will have a value of -1. +// +// See also the method Doms. Internally Dominators must construct the +// transpose of g and also compute a postordering of a spanning tree of the +// subgraph reachable from start. If you happen to have either of these +// computed anyway, it can be more efficient to call Doms directly. +func (g LabeledDirected) Dominators(start NI) Dominators { + a := g.LabeledAdjacencyList + l := len(a) + // ExampleDoms shows traditional depth-first postorder, but it works to + // generate a reverse preorder. Also breadth-first works instead of + // depth-first and may allow Doms to run a little faster by presenting + // a shallower tree. + post := make([]NI, l) + a.BreadthFirst(start, func(n NI) { + l-- + post[l] = n + }) + tr, _ := g.Transpose() + return g.Doms(tr, post[l:]) +} + +// Doms computes either immediate dominators or postdominators. +// +// The slice returned as Dominators.Immediate will have the length of +// g.AdjacencyList. Nodes without a path to end will have a value of -1. +// +// But see also the simpler methods Dominators and PostDominators. +// +// Doms requires argument tr to be the transpose graph of receiver g, +// and requres argument post to be a post ordering of receiver g. More +// specifically a post ordering of a spanning tree of the subgraph reachable +// from some start node in g. The start node will always be the last node in +// this postordering so it does not need to passed as a separate argument. +// +// Doms can be used to construct either dominators or postdominators. +// To construct dominators on a graph f, generate a postordering p on f +// and call f.Doms(f.Transpose(), p). To construct postdominators, generate +// the transpose t first, then a postordering p on t (not f), and call +// t.Doms(f, p). +// +// Caution: The argument tr is retained in the returned Dominators object +// and is used by the method Dominators.Frontier. It is not deep-copied +// so it is invalid to call Doms, modify the tr graph, and then call Frontier. +func (g LabeledDirected) Doms(tr LabeledDirected, post []NI) Dominators { + a := g.LabeledAdjacencyList + dom := make([]NI, len(a)) + pi := make([]int, len(a)) + for i, n := range post { + pi[n] = i + } + intersect := func(b1, b2 NI) NI { + for b1 != b2 { + for pi[b1] < pi[b2] { + b1 = dom[b1] + } + for pi[b2] < pi[b1] { + b2 = dom[b2] + } + } + return b1 + } + for n := range dom { + dom[n] = -1 + } + start := post[len(post)-1] + dom[start] = start + for changed := false; ; changed = false { + for i := len(post) - 2; i >= 0; i-- { + b := post[i] + var im NI + fr := tr.LabeledAdjacencyList[b] + var j int + var fp Half + for j, fp = range fr { + if dom[fp.To] >= 0 { + im = fp.To + break + } + } + for _, p := range fr[j:] { + if dom[p.To] >= 0 { + im = intersect(im, p.To) + } + } + if dom[b] != im { + dom[b] = im + changed = true + } + } + if !changed { + return Dominators{dom, tr} + } + } +} + +// PostDominators computes the immediate postdominator for each node that can +// reach node end. +// +// The slice returned as Dominators.Immediate will have the length of +// g.AdjacencyList. Nodes without a path to end will have a value of -1. +// +// See also the method Doms. Internally Dominators must construct the +// transpose of g and also compute a postordering of a spanning tree of the +// subgraph of the transpose reachable from end. If you happen to have either +// of these computed anyway, it can be more efficient to call Doms directly. +// +// See the method Doms anyway for the caution note. PostDominators calls +// Doms internally, passing receiver g as Doms argument tr. The caution means +// that it is invalid to call PostDominators, modify the graph g, then call +// Frontier. +func (g LabeledDirected) PostDominators(end NI) Dominators { + tr, _ := g.Transpose() + a := tr.LabeledAdjacencyList + l := len(a) + post := make([]NI, l) + a.BreadthFirst(end, func(n NI) { + l-- + post[l] = n + }) + return tr.Doms(g, post[l:]) +} + +// called from Dominators.Frontier via interface +func (from LabeledDirected) domFrontiers(d Dominators) DominanceFrontiers { + im := d.Immediate + f := make(DominanceFrontiers, len(im)) + for i := range f { + if im[i] >= 0 { + f[i] = map[NI]struct{}{} + } + } + for b, fr := range from.LabeledAdjacencyList { + if len(fr) < 2 { + continue + } + imb := im[b] + for _, p := range fr { + for runner := p.To; runner != imb; runner = im[runner] { + f[runner][NI(b)] = struct{}{} + } + } + } + return f +} + +// Eulerian scans a directed graph to determine if it is Eulerian. // -// Receiver g should be connected as a tree or forest. Specifically no node -// can have multiple incoming arcs. If any node n in g has multiple incoming -// arcs, the method returns (nil, n) where n is a node with multiple -// incoming arcs. +// If the graph represents an Eulerian cycle, it returns -1, -1, nil. // -// Otherwise (normally) the method populates the From members in a -// FromList.Path and returns the FromList and -1. +// If the graph does not represent an Eulerian cycle but does represent an +// Eulerian path, it returns the start and end nodes of the path, and nil. // -// Other members of the FromList are left as zero values. -// Use FromList.RecalcLen and FromList.RecalcLeaves as needed. +// Otherwise it returns an error indicating a reason the graph is non-Eulerian. +// Also in this case it returns a relevant node in either start or end. // -// Unusual cases are parallel arcs and loops. A parallel arc represents -// a case of multiple arcs going to some node and so will lead to a (nil, n) -// return, even though a graph might be considered a multigraph tree. -// A single loop on a node that would otherwise be a root node, though, -// is not a case of multiple incoming arcs and so does not force a (nil, n) -// result. +// See also method EulerianStart, which short-circuits when it finds a start +// node whereas this method completely validates a graph as Eulerian. // // There are equivalent labeled and unlabeled versions of this method. -func (g LabeledDirected) FromList() (*FromList, NI) { - paths := make([]PathEnd, len(g.LabeledAdjacencyList)) - for i := range paths { - paths[i].From = -1 - } - for fr, to := range g.LabeledAdjacencyList { - for _, to := range to { - if paths[to.To].From >= 0 { - return nil, to.To +func (g LabeledDirected) Eulerian() (start, end NI, err error) { + ind := g.InDegree() + start = -1 + end = -1 + for n, to := range g.LabeledAdjacencyList { + switch { + case len(to) > ind[n]: + if start >= 0 { + return NI(n), -1, errors.New("multiple start candidates") + } + if len(to) > ind[n]+1 { + return NI(n), -1, errors.New("excessive out-degree") + } + start = NI(n) + case ind[n] > len(to): + if end >= 0 { + return -1, NI(n), errors.New("multiple end candidates") + } + if ind[n] > len(to)+1 { + return -1, NI(n), errors.New("excessive in-degree") } - paths[to.To].From = NI(fr) + end = NI(n) + } + } + return start, end, nil +} + +// EulerianCycle finds an Eulerian cycle in a directed multigraph. +// +// * If g has no nodes, result is nil, nil. +// +// * If g is Eulerian, result is an Eulerian cycle with err = nil. +// The first element of the result represents only a start node. +// The remaining elements represent the half arcs of the cycle. +// +// * Otherwise, result is nil, with a non-nil error giving a reason the graph +// is not Eulerian. +// +// Internally, EulerianCycle copies the entire graph g. +// See EulerianCycleD for a more space efficient version. +// +// There are nearly equivalent labeled and unlabeled versions of this method. +// In the labeled version the first element of of the +func (g LabeledDirected) EulerianCycle() ([]Half, error) { + c, m := g.Copy() + return c.EulerianCycleD(m) +} + +// EulerianCycleD finds an Eulerian cycle in a directed multigraph. +// +// EulerianCycleD is destructive on its receiver g. See EulerianCycle for +// a non-destructive version. +// +// Argument ma must be the correct arc size, or number of arcs in g. +// +// * If g has no nodes, result is nil, nil. +// +// * If g is Eulerian, result is an Eulerian cycle with err = nil. +// The first element of the result represents only a start node. +// The remaining elements represent the half arcs of the cycle. +// +// * Otherwise, result is nil, with a non-nil error giving a reason the graph +// is not Eulerian. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledDirected) EulerianCycleD(ma int) ([]Half, error) { + // algorithm adapted from "Sketch of Eulerian Circuit Algorithm" by + // Carl Lee, accessed at http://www.ms.uky.edu/~lee/ma515fa10/euler.pdf. + if g.Order() == 0 { + return nil, nil + } + e := newLabEulerian(g.LabeledAdjacencyList, ma) + e.p[0] = Half{0, -1} + for e.s >= 0 { + v := e.top() // v is node that starts cycle + e.push() + // if Eulerian, we'll always come back to starting node + if e.top().To != v.To { + return nil, errors.New("not Eulerian") + } + e.keep() + } + if !e.uv.AllZeros() { + return nil, errors.New("not strongly connected") + } + return e.p, nil +} + +// EulerianPath finds an Eulerian path in a directed multigraph. +// +// * If g has no nodes, result is nil, nil. +// +// * If g has an Eulerian path, result is an Eulerian path with err = nil. +// The first element of the result represents only a start node. +// The remaining elements represent the half arcs of the path. +// +// * Otherwise, result is nil, with a non-nil error giving a reason the graph +// is not Eulerian. +// +// Internally, EulerianPath copies the entire graph g. +// See EulerianPathD for a more space efficient version. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledDirected) EulerianPath() ([]Half, error) { + c, m := g.Copy() + start, err := c.EulerianStart() + if err != nil { + return nil, err + } + if start < 0 { + start = 0 + } + return c.EulerianPathD(m, start) +} + +// EulerianPathD finds an Eulerian path in a directed multigraph. +// +// EulerianPathD is destructive on its receiver g. See EulerianPath for +// a non-destructive version. +// +// Argument ma must be the correct arc size, or number of arcs in g. +// Argument start must be a valid start node for the path. +// +// * If g has no nodes, result is nil, nil. +// +// * If g has an Eulerian path starting at start, result is an Eulerian path +// with err = nil. +// The first element of the result represents only a start node. +// The remaining elements represent the half arcs of the path. +// +// * Otherwise, result is nil, with a non-nil error giving a reason the graph +// is not Eulerian. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledDirected) EulerianPathD(ma int, start NI) ([]Half, error) { + if g.Order() == 0 { + return nil, nil + } + e := newLabEulerian(g.LabeledAdjacencyList, ma) + e.p[0] = Half{start, -1} + // unlike EulerianCycle, the first path doesn't have to be a cycle. + e.push() + e.keep() + for e.s >= 0 { + start = e.top().To + e.push() + // paths after the first must be cycles though + // (as long as there are nodes on the stack) + if e.top().To != start { + return nil, errors.New("no Eulerian path") + } + e.keep() + } + if !e.uv.AllZeros() { + return nil, errors.New("no Eulerian path") + } + return e.p, nil +} + +// EulerianStart finds a candidate start node for an Eulerian path. +// +// A candidate start node in the directed case has out-degree one greater then +// in-degree. EulerianStart scans the graph returning immediately with the +// node (and err == nil) when it finds such a candidate. +// +// EulerianStart also returns immediately with an error if it finds the graph +// cannot contain an Eulerian path. In this case it also returns a relevant +// node. +// +// If the scan completes without finding a candidate start node, the graph +// represents an Eulerian cycle. In this case it returns -1, nil, and any +// node can be chosen as a start node for an eulerian path. +// +// See also method Eulerian, which completely validates a graph as Eulerian +// whereas this method short-curcuits when it finds a start node. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledDirected) EulerianStart() (start NI, err error) { + ind := g.InDegree() + end := -1 + for n, to := range g.LabeledAdjacencyList { + switch { + case len(to) > ind[n]: + if len(to) == ind[n]+1 { + return NI(n), nil // candidate start + } + return -1, errors.New("excessive out-degree") + case ind[n] > len(to): + if end >= 0 { + return NI(n), errors.New("multiple end candidates") + } + if ind[n] > len(to)+1 { + return NI(n), errors.New("excessive in-degree") + } + end = n + } + } + return -1, nil // cycle +} + +type labEulerian struct { + g LabeledAdjacencyList // working copy of graph, it gets consumed + m int // number of arcs in g, updated as g is consumed + uv bits.Bits // unvisited + // low end of p is stack of unfinished nodes + // high end is finished path + p []Half // stack + path + s int // stack pointer +} + +func newLabEulerian(g LabeledAdjacencyList, m int) *labEulerian { + e := &labEulerian{ + g: g, + m: m, + uv: bits.New(len(g)), + p: make([]Half, m+1), + } + e.uv.SetAll() + return e +} + +// starting with the node on top of the stack, move nodes with no arcs. +func (e *labEulerian) keep() { + for e.s >= 0 { + n := e.top() + if len(e.g[n.To]) > 0 { + break + } + e.p[e.m] = n + e.s-- + e.m-- + } +} + +func (e *labEulerian) top() Half { + return e.p[e.s] +} + +// MaximalNonBranchingPaths finds all paths in a directed graph that are +// "maximal" and "non-branching". +// +// A non-branching path is one where path nodes other than the first and last +// have exactly one arc leading to the node and one arc leading from the node, +// thus there is no possibility to branch away to a different path. +// +// A maximal non-branching path cannot be extended to a longer non-branching +// path by including another node at either end. +// +// In the case of a cyclic non-branching path, the first and last nodes +// of the path will be the same node, indicating an isolated cycle. +// +// The method calls the emit argument for each path or isolated cycle in g, +// as long as emit returns true. If emit returns false, +// MaximalNonBranchingPaths returns immediately. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledDirected) MaximalNonBranchingPaths(emit func([]Half) bool) { + a := g.LabeledAdjacencyList + ind := g.InDegree() + uv := bits.New(g.Order()) + uv.SetAll() + for v, vTo := range a { + if !(ind[v] == 1 && len(vTo) == 1) { + for _, w := range vTo { + n := []Half{Half{NI(v), -1}, w} + uv.SetBit(v, 0) + uv.SetBit(int(w.To), 0) + wTo := a[w.To] + for ind[w.To] == 1 && len(wTo) == 1 { + u := wTo[0] + n = append(n, u) + uv.SetBit(int(u.To), 0) + w = u + wTo = a[w.To] + } + if !emit(n) { // n is a path + return + } + } + } + } + // use uv.From rather than uv.Iterate. + // Iterate doesn't work here because we're modifying uv + for b := uv.OneFrom(0); b >= 0; b = uv.OneFrom(b + 1) { + v := Half{NI(b), -1} + n := []Half{v} + for w := v; ; { + w = a[w.To][0] + uv.SetBit(int(w.To), 0) + n = append(n, w) + if w.To == v.To { + break + } + } + if !emit(n) { // n is an isolated cycle + return } } - return &FromList{Paths: paths}, -1 } // InDegree computes the in-degree of each node in g // // There are equivalent labeled and unlabeled versions of this method. func (g LabeledDirected) InDegree() []int { - ind := make([]int, len(g.LabeledAdjacencyList)) + ind := make([]int, g.Order()) for _, nbs := range g.LabeledAdjacencyList { for _, nb := range nbs { ind[nb.To]++ @@ -127,6 +593,128 @@ func (g LabeledDirected) InDegree() []int { return ind } +// AddNode maps a node in a supergraph to a subgraph node. +// +// Argument p must be an NI in supergraph s.Super. AddNode panics if +// p is not a valid node index of s.Super. +// +// AddNode is idempotent in that it does not add a new node to the subgraph if +// a subgraph node already exists mapped to supergraph node p. +// +// The mapped subgraph NI is returned. +func (s *LabeledDirectedSubgraph) AddNode(p NI) (b NI) { + if int(p) < 0 || int(p) >= s.Super.Order() { + panic(fmt.Sprint("AddNode: NI ", p, " not in supergraph")) + } + if b, ok := s.SubNI[p]; ok { + return b + } + a := s.LabeledDirected.LabeledAdjacencyList + b = NI(len(a)) + s.LabeledDirected.LabeledAdjacencyList = append(a, nil) + s.SuperNI = append(s.SuperNI, p) + s.SubNI[p] = b + return +} + +// AddArc adds an arc to a subgraph. +// +// Arguments fr, to must be NIs in supergraph s.Super. As with AddNode, +// AddArc panics if fr and to are not valid node indexes of s.Super. +// +// The arc specfied by fr, to must exist in s.Super. Further, the number of +// parallel arcs in the subgraph cannot exceed the number of corresponding +// parallel arcs in the supergraph. That is, each arc already added to the +// subgraph counts against the arcs available in the supergraph. If a matching +// arc is not available, AddArc returns an error. +// +// If a matching arc is available, subgraph nodes are added as needed, the +// subgraph arc is added, and the method returns nil. +func (s *LabeledDirectedSubgraph) AddArc(fr NI, to Half) error { + // verify supergraph NIs first, but without adding subgraph nodes just yet. + if int(fr) < 0 || int(fr) >= s.Super.Order() { + panic(fmt.Sprint("AddArc: NI ", fr, " not in supergraph")) + } + if int(to.To) < 0 || int(to.To) >= s.Super.Order() { + panic(fmt.Sprint("AddArc: NI ", to.To, " not in supergraph")) + } + // count existing matching arcs in subgraph + n := 0 + a := s.LabeledDirected.LabeledAdjacencyList + if bf, ok := s.SubNI[fr]; ok { + if bt, ok := s.SubNI[to.To]; ok { + // both NIs already exist in subgraph, need to count arcs + bTo := to + bTo.To = bt + for _, t := range a[bf] { + if t == bTo { + n++ + } + } + } + } + // verify matching arcs are available in supergraph + for _, t := range (*s.Super).LabeledAdjacencyList[fr] { + if t == to { + if n > 0 { + n-- // match existing arc + continue + } + // no more existing arcs need to be matched. nodes can finally + // be added as needed and then the arc can be added. + bf := s.AddNode(fr) + to.To = s.AddNode(to.To) + s.LabeledDirected.LabeledAdjacencyList[bf] = + append(s.LabeledDirected.LabeledAdjacencyList[bf], to) + return nil // success + } + } + return errors.New("arc not available in supergraph") +} + +// InduceList constructs a node-induced subgraph. +// +// The subgraph is induced on receiver graph g. Argument l must be a list of +// NIs in receiver graph g. Receiver g becomes the supergraph of the induced +// subgraph. +// +// Duplicate NIs are allowed in list l. The duplicates are effectively removed +// and only a single corresponding node is created in the subgraph. Subgraph +// NIs are mapped in the order of list l, execpt for ignoring duplicates. +// NIs in l that are not in g will panic. +// +// Returned is the constructed Subgraph object containing the induced subgraph +// and the mappings to the supergraph. +func (g *LabeledDirected) InduceList(l []NI) *LabeledDirectedSubgraph { + sub, sup := mapList(l) + return &LabeledDirectedSubgraph{ + Super: g, + SubNI: sub, + SuperNI: sup, + LabeledDirected: LabeledDirected{ + g.LabeledAdjacencyList.induceArcs(sub, sup), + }} +} + +// InduceBits constructs a node-induced subgraph. +// +// The subgraph is induced on receiver graph g. Argument t must be a bitmap +// representing NIs in receiver graph g. Receiver g becomes the supergraph +// of the induced subgraph. NIs in t that are not in g will panic. +// +// Returned is the constructed Subgraph object containing the induced subgraph +// and the mappings to the supergraph. +func (g *LabeledDirected) InduceBits(t bits.Bits) *LabeledDirectedSubgraph { + sub, sup := mapBits(t) + return &LabeledDirectedSubgraph{ + Super: g, + SubNI: sub, + SuperNI: sup, + LabeledDirected: LabeledDirected{ + g.LabeledAdjacencyList.induceArcs(sub, sup), + }} +} + // IsTree identifies trees in directed graphs. // // Return value isTree is true if the subgraph reachable from root is a tree. @@ -136,14 +724,14 @@ func (g LabeledDirected) InDegree() []int { // There are equivalent labeled and unlabeled versions of this method. func (g LabeledDirected) IsTree(root NI) (isTree, allTree bool) { a := g.LabeledAdjacencyList - var v Bits - v.SetAll(len(a)) + v := bits.New(len(a)) + v.SetAll() var df func(NI) bool df = func(n NI) bool { - if v.Bit(n) == 0 { + if v.Bit(int(n)) == 0 { return false } - v.SetBit(n, 0) + v.SetBit(int(n), 0) for _, to := range a[n] { if !df(to.To) { return false @@ -152,118 +740,151 @@ func (g LabeledDirected) IsTree(root NI) (isTree, allTree bool) { return true } isTree = df(root) - return isTree, isTree && v.Zero() + return isTree, isTree && v.AllZeros() +} + +// PageRank computes a significance score for each node of a graph. +// +// The algorithm is credited to Google founders Brin and Lawrence. +// +// Argument d is a damping factor. Reportedly a value of .85 works well. +// Argument n is a number of iterations. Reportedly values of 20 to 50 +// work well. +// +// Returned is the PageRank score for each node of g. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledDirected) PageRank(d float64, n int) []float64 { + // Following "PageRank Explained" by Ian Rogers, accessed at + // http://www.cs.princeton.edu/~chazelle/courses/BIB/pagerank.htm + a := g.LabeledAdjacencyList + p0 := make([]float64, len(a)) + p1 := make([]float64, len(a)) + for i := range p0 { + p0[i] = 1 + } + d1 := 1 - d + for ; n > 0; n-- { + for i := range p1 { + p1[i] = d1 + } + for fr, to := range a { + f := d / float64(len(to)) + for _, to := range to { + p1[to.To] += p0[fr] * f + } + } + p0, p1 = p1, p0 + } + return p0 } -// Tarjan identifies strongly connected components in a directed graph using -// Tarjan's algorithm. +// StronglyConnectedComponents identifies strongly connected components in +// a directed graph. +// +// The method calls the emit function for each component identified. The +// argument to emit is the node list of a component. The emit function must +// return true for the method to continue identifying components. If emit +// returns false, the method returns immediately. +// +// Note well: The backing slice for the node list passed to emit is reused +// across emit calls. If you need to retain the node list you must copy it. // -// The method calls the emit argument for each component identified. Each -// component is a list of nodes. A property of the algorithm is that -// components are emitted in reverse topological order of the condensation. -// (See https://en.wikipedia.org/wiki/Strongly_connected_component#Definitions -// for description of condensation.) +// The components emitted represent a partition of the nodes in g. +// So for example, if the first component emitted has the same length as g +// then it will be the only component and it means the entire graph g is +// strongly connected. +// +// See also Condensation which returns a condensation graph in addition +// to the strongly connected components. // // There are equivalent labeled and unlabeled versions of this method. // -// See also TarjanForward and TarjanCondensation. -func (g LabeledDirected) Tarjan(emit func([]NI) bool) { - // See "Depth-first search and linear graph algorithms", Robert Tarjan, - // SIAM J. Comput. Vol. 1, No. 2, June 1972. - // - // Implementation here from Wikipedia pseudocode, - // http://en.wikipedia.org/w/index.php?title=Tarjan%27s_strongly_connected_components_algorithm&direction=prev&oldid=647184742 - var indexed, stacked Bits +// The algorithm here is by David Pearce. See also alt.SCCPathBased and +// alt.SCCTarjan. +func (g LabeledDirected) StronglyConnectedComponents(emit func([]NI) bool) { + // See Algorithm 3 PEA FIND SCC2(V,E) in "An Improved Algorithm for + // Finding the Strongly Connected Components of a Directed Graph" + // by David J. Pearce. a := g.LabeledAdjacencyList - index := make([]int, len(a)) - lowlink := make([]int, len(a)) - x := 0 - var S []NI - var sc func(NI) bool - sc = func(n NI) bool { - index[n] = x - indexed.SetBit(n, 1) - lowlink[n] = x - x++ - S = append(S, n) - stacked.SetBit(n, 1) - for _, nb := range a[n] { - if indexed.Bit(nb.To) == 0 { - if !sc(nb.To) { + rindex := make([]int, len(a)) + var S, scc []NI + index := 1 + c := len(a) - 1 + var visit func(NI) bool + visit = func(v NI) bool { + root := true + rindex[v] = index + index++ + for _, w := range a[v] { + if rindex[w.To] == 0 { + if !visit(w.To) { return false } - if lowlink[nb.To] < lowlink[n] { - lowlink[n] = lowlink[nb.To] - } - } else if stacked.Bit(nb.To) == 1 { - if index[nb.To] < lowlink[n] { - lowlink[n] = index[nb.To] - } + } + if rindex[w.To] < rindex[v] { + rindex[v] = rindex[w.To] + root = false } } - if lowlink[n] == index[n] { - var c []NI - for { - last := len(S) - 1 - w := S[last] - S = S[:last] - stacked.SetBit(w, 0) - c = append(c, w) - if w == n { - if !emit(c) { - return false - } - break - } + if !root { + S = append(S, v) + return true + } + scc = scc[:0] + index-- + for last := len(S) - 1; last >= 0; last-- { + w := S[last] + if rindex[v] > rindex[w] { + break } + S = S[:last] + rindex[w] = c + scc = append(scc, w) + index-- } - return true + rindex[v] = c + c-- + return emit(append(scc, v)) } - for n := range a { - if indexed.Bit(NI(n)) == 0 && !sc(NI(n)) { - return + for v := range a { + if rindex[v] == 0 && !visit(NI(v)) { + break } } } -// TarjanForward returns strongly connected components. +// Condensation returns strongly connected components and their +// condensation graph. +// +// A condensation represents a directed acyclic graph. +// Components are ordered in a reverse topological ordering. +// +// See also StronglyConnectedComponents, which returns the components only. // -// It returns components in the reverse order of Tarjan, for situations -// where a forward topological ordering is easier. -func (g LabeledDirected) TarjanForward() [][]NI { - var r [][]NI - g.Tarjan(func(c []NI) bool { - r = append(r, c) +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledDirected) Condensation() (scc [][]NI, cd AdjacencyList) { + a := g.LabeledAdjacencyList + b := make([]NI, len(a)) // backing slice for scc + g.StronglyConnectedComponents(func(c []NI) bool { + n := copy(b, c) + scc = append(scc, b[:n]) + b = b[n:] return true }) - scc := make([][]NI, len(r)) - last := len(r) - 1 - for i, ci := range r { - scc[last-i] = ci - } - return scc -} - -// TarjanCondensation returns strongly connected components and their -// condensation graph. -// -// Components are ordered in a forward topological ordering. -func (g LabeledDirected) TarjanCondensation() (scc [][]NI, cd AdjacencyList) { - scc = g.TarjanForward() - cd = make(AdjacencyList, len(scc)) // return value - cond := make([]NI, len(g.LabeledAdjacencyList)) // mapping from g node to cd node - for cn := NI(len(scc) - 1); cn >= 0; cn-- { - c := scc[cn] + cd = make(AdjacencyList, len(scc)) // return value + cond := make([]NI, len(a)) // mapping from g node to cd node + for cn, c := range scc { for _, n := range c { cond[n] = NI(cn) // map g node to cd node } - var tos []NI // list of 'to' nodes - var m Bits // tos map + var tos []NI // list of 'to' nodes + m := bits.New(len(cd)) // tos map m.SetBit(cn, 1) for _, n := range c { - for _, to := range g.LabeledAdjacencyList[n] { - if ct := cond[to.To]; m.Bit(ct) == 0 { - m.SetBit(ct, 1) + for _, to := range a[n] { + if ct := cond[to.To]; m.Bit(int(ct)) == 0 { + m.SetBit(int(ct), 1) tos = append(tos, ct) } } @@ -282,23 +903,35 @@ func (g LabeledDirected) TarjanCondensation() (scc [][]NI, cd AdjacencyList) { // // There are equivalent labeled and unlabeled versions of this method. func (g LabeledDirected) Topological() (ordering, cycle []NI) { + i := -1 + return g.dfTopo(func() NI { + i++ + if i < g.Order() { + return NI(i) + } + return -1 + }) +} + +func (g LabeledDirected) dfTopo(f func() NI) (ordering, cycle []NI) { a := g.LabeledAdjacencyList ordering = make([]NI, len(a)) i := len(ordering) - var temp, perm Bits + temp := bits.New(len(a)) + perm := bits.New(len(a)) var cycleFound bool var cycleStart NI var df func(NI) df = func(n NI) { switch { - case temp.Bit(n) == 1: + case temp.Bit(int(n)) == 1: cycleFound = true cycleStart = n return - case perm.Bit(n) == 1: + case perm.Bit(int(n)) == 1: return } - temp.SetBit(n, 1) + temp.SetBit(int(n), 1) for _, nb := range a[n] { df(nb.To) if cycleFound { @@ -316,21 +949,24 @@ func (g LabeledDirected) Topological() (ordering, cycle []NI) { return } } - temp.SetBit(n, 0) - perm.SetBit(n, 1) + temp.SetBit(int(n), 0) + perm.SetBit(int(n), 1) i-- ordering[i] = n } - for n := range a { - if perm.Bit(NI(n)) == 1 { + for { + n := f() + if n < 0 { + return ordering[i:], nil + } + if perm.Bit(int(n)) == 1 { continue } - df(NI(n)) + df(n) if cycleFound { return nil, cycle } } - return ordering, nil } // TopologicalKahn computes a topological ordering of a directed acyclic graph. @@ -349,7 +985,7 @@ func (g LabeledDirected) TopologicalKahn(tr Directed) (ordering, cycle []NI) { var L, S []NI // rem for "remaining edges," this function makes a local copy of the // in-degrees and consumes that instead of consuming an input. - rem := make([]int, len(g.LabeledAdjacencyList)) + rem := make([]int, g.Order()) for n, fr := range tr.AdjacencyList { if len(fr) == 0 { // accumulate "set of all nodes with no incoming edges" @@ -393,3 +1029,63 @@ func (g LabeledDirected) TopologicalKahn(tr Directed) (ordering, cycle []NI) { } return L, nil } + +// TopologicalSubgraph computes a topological ordering of a subgraph of a +// directed acyclic graph. +// +// The subgraph considered is that reachable from the specified node list. +// +// For an acyclic subgraph, return value ordering is a permutation of reachable +// node numbers in topologically sorted order and cycle will be nil. If the +// subgraph is found to be cyclic, ordering will be nil and cycle will be +// the path of a found cycle. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledDirected) TopologicalSubgraph(nodes []NI) (ordering, cycle []NI) { + i := -1 + return g.dfTopo(func() NI { + i++ + if i < len(nodes) { + return nodes[i] + } + return -1 + }) +} + +// TransitiveClosure returns the transitive closure of directed graph g. +// +// The algorithm is Warren's, which works most naturally with an adjacency +// matrix representation. The returned transitive closure is left in this +// adjacency matrix representation. For a graph g of order n, matrix tc +// is returned as a length n slice of length n bits.Bits values, where +// tc[from].Bit(to) == 1 represents an arc of the transitive closure. +func (g LabeledDirected) TransitiveClosure() []bits.Bits { + // construct adjacency matrix + a := g.LabeledAdjacencyList + t := make([]bits.Bits, len(a)) + for n := range t { + tn := bits.New(len(a)) + for _, to := range a[n] { + tn.SetBit(int(to.To), 1) + } + t[n] = tn + } + // above diagonal + for i := 1; i < len(a); i++ { + ti := t[i] + for k := 0; k < i; k++ { + if ti.Bit(k) == 1 { + ti.Or(ti, t[k]) + } + } + } + // below diagonal + for i, ti := range t[:len(a)-1] { + for k := i + 1; k < len(a); k++ { + if ti.Bit(k) == 1 { + ti.Or(ti, t[k]) + } + } + } + return t +} diff --git a/vendor/github.com/soniakeys/graph/doc.go b/vendor/github.com/soniakeys/graph/doc.go index 6d072789..a30b5897 100644 --- a/vendor/github.com/soniakeys/graph/doc.go +++ b/vendor/github.com/soniakeys/graph/doc.go @@ -4,7 +4,7 @@ // Graph algorithms: Dijkstra, A*, Bellman Ford, Floyd Warshall; // Kruskal and Prim minimal spanning tree; topological sort and DAG longest // and shortest paths; Eulerian cycle and path; degeneracy and k-cores; -// Bron Kerbosch clique finding; connected components; and others. +// Bron Kerbosch clique finding; connected components; dominance; and others. // // This is a graph library of integer indexes. To use it with application // data, you associate data with integer indexes, perform searches or other @@ -44,8 +44,8 @@ // // In contrast to Half, the type Edge represents both ends of an edge (but // no label.) The type LabeledEdge adds the label. The type WeightedEdgeList -// bundles a list of LabeledEdges with a WeightFunc. WeightedEdgeList is -// currently only used by Kruskal methods. +// bundles a list of LabeledEdges with a WeightFunc. (WeightedEdgeList has +// few methods. It exists primarily to support the Kruskal algorithm.) // // FromList is a compact rooted tree (or forest) respresentation. Like // AdjacencyList and LabeledAdjacencyList, it is a list with one element for @@ -59,8 +59,7 @@ // simply by ignoring the label. In these cases code generation provides // methods on both types from a single source implementation. These methods // are documented with the sentence "There are equivalent labeled and unlabeled -// versions of this method" and examples are provided only for the unlabeled -// version. +// versions of this method." // // Terminology // @@ -114,15 +113,10 @@ // distance with the same minimum length, search methods are free to return // any of them. // -// Type name Description, methods -// BreadthFirst Unweigted arcs, traversal, single path search or all paths. -// BreadthFirst2 Direction-optimizing variant of BreadthFirst. +// Algorithm Description // Dijkstra Non-negative arc weights, single or all paths. // AStar Non-negative arc weights, heuristic guided, single path. // BellmanFord Negative arc weights allowed, no negative cycles, all paths. // DAGPath O(n) algorithm for DAGs, arc weights of any sign. // FloydWarshall all pairs distances, no negative cycles. -// -// These searches typically have one method that is full-featured and -// then a convenience method with a simpler API targeting a simpler use case. package graph diff --git a/vendor/github.com/soniakeys/graph/fromlist.go b/vendor/github.com/soniakeys/graph/fromlist.go index 31d41fa1..ce8c27f4 100644 --- a/vendor/github.com/soniakeys/graph/fromlist.go +++ b/vendor/github.com/soniakeys/graph/fromlist.go @@ -3,6 +3,8 @@ package graph +import "github.com/soniakeys/bits" + // FromList represents a rooted tree (or forest) where each node is associated // with a half arc identifying an arc "from" another node. // @@ -34,7 +36,7 @@ package graph // useful to validate the acyclic property. type FromList struct { Paths []PathEnd // tree representation - Leaves Bits // leaves of tree + Leaves bits.Bits // leaves of tree MaxLen int // length of longest path, max of all PathEnd.Len values } @@ -46,10 +48,17 @@ type PathEnd struct { Len int // number of nodes in path from start } +/* NewFromList could be confusing now with bits also needing allocation. +maybe best to not have this function. Maybe a more useful new would be +one that took a PathEnd slice and intitialized everything including roots +and max len. Maybe its time for a separate []PathEnd type when that's +all that's needed. (and reconsider the name PathEnd) +*/ + // NewFromList creates a FromList object of given order. // -// The Paths member is allocated to length n but there is no other -// initialization. +// The Paths member is allocated to the specified order n but other members +// are left as zero values. func NewFromList(n int) FromList { return FromList{Paths: make([]PathEnd, n)} } @@ -106,18 +115,18 @@ func (f FromList) CommonStart(a, b NI) NI { // Note that the bool is not an "ok" return. A cyclic FromList is usually // not okay. func (f FromList) Cyclic() (cyclic bool, n NI) { - var vis Bits p := f.Paths + vis := bits.New(len(p)) for i := range p { - var path Bits - for n := NI(i); vis.Bit(n) == 0; { + path := bits.New(len(p)) + for n := i; vis.Bit(n) == 0; { vis.SetBit(n, 1) path.SetBit(n, 1) - if n = p[n].From; n < 0 { + if n = int(p[n].From); n < 0 { break } if path.Bit(n) == 1 { - return true, n + return true, NI(n) } } } @@ -127,13 +136,14 @@ func (f FromList) Cyclic() (cyclic bool, n NI) { // IsolatedNodeBits returns a bitmap of isolated nodes in receiver graph f. // // An isolated node is one with no arcs going to or from it. -func (f FromList) IsolatedNodes() (iso Bits) { +func (f FromList) IsolatedNodes() (iso bits.Bits) { p := f.Paths - iso.SetAll(len(p)) + iso = bits.New(len(p)) + iso.SetAll() for n, e := range p { if e.From >= 0 { - iso.SetBit(NI(n), 0) - iso.SetBit(e.From, 0) + iso.SetBit(n, 0) + iso.SetBit(int(e.From), 0) } } return @@ -170,7 +180,7 @@ func (f FromList) PathTo(end NI, p []NI) []NI { func PathTo(paths []PathEnd, end NI, p []NI) []NI { n := paths[end].Len if n == 0 { - return nil + return p[:0] } if cap(p) >= n { p = p[:n] @@ -187,7 +197,39 @@ func PathTo(paths []PathEnd, end NI, p []NI) []NI { } } -// Preorder traverses f calling Visitor v in preorder. +// PathToLabeled decodes a FromList, recovering a single path. +// +// The start of the returned path will be a root node of the FromList. +// +// Only the Paths member of the receiver is used. Other members of the +// FromList do not need to be valid, however the MaxLen member can be useful +// for allocating argument p. +// +// Argument p can provide the result slice. If p has capacity for the result +// it will be used, otherwise a new slice is created for the result. +// +// See also function PathTo. +func (f FromList) PathToLabeled(end NI, labels []LI, p []Half) LabeledPath { + n := f.Paths[end].Len - 1 + if n <= 0 { + return LabeledPath{end, p[:0]} + } + if cap(p) >= n { + p = p[:n] + } else { + p = make([]Half, n) + } + for { + n-- + p[n] = Half{To: end, Label: labels[end]} + end = f.Paths[end].From + if n == 0 { + return LabeledPath{end, p} + } + } +} + +// Preorder traverses a FromList in preorder. // // Nodes are visited in order such that for any node n with from node fr, // fr is visited before n. Where f represents a tree, the visit ordering @@ -198,16 +240,17 @@ func PathTo(paths []PathEnd, end NI, p []NI) []NI { // Leaves must be set correctly first. Use RecalcLeaves if leaves are not // known to be set correctly. FromList f cannot be cyclic. // -// Traversal continues while v returns true. It terminates if v returns false. -// Preorder returns true if it completes without v returning false. Preorder -// returns false if traversal is terminated by v returning false. -func (f FromList) Preorder(v OkNodeVisitor) bool { +// Traversal continues while visitor function v returns true. It terminates +// if v returns false. Preorder returns true if it completes without v +// returning false. Preorder returns false if traversal is terminated by v +// returning false. +func (f FromList) Preorder(v func(NI) bool) bool { p := f.Paths - var done Bits + done := bits.New(len(p)) var df func(NI) bool df = func(n NI) bool { - done.SetBit(n, 1) - if fr := p[n].From; fr >= 0 && done.Bit(fr) == 0 { + done.SetBit(int(n), 1) + if fr := p[n].From; fr >= 0 && done.Bit(int(fr)) == 0 { df(fr) } return v(n) @@ -215,8 +258,8 @@ func (f FromList) Preorder(v OkNodeVisitor) bool { for n := range f.Paths { p[n].Len = 0 } - return f.Leaves.Iterate(func(n NI) bool { - return df(n) + return f.Leaves.IterateOnes(func(n int) bool { + return df(NI(n)) }) } @@ -224,10 +267,13 @@ func (f FromList) Preorder(v OkNodeVisitor) bool { func (f *FromList) RecalcLeaves() { p := f.Paths lv := &f.Leaves - lv.SetAll(len(p)) + if lv.Num != len(p) { + *lv = bits.New(len(p)) + } + lv.SetAll() for n := range f.Paths { if fr := p[n].From; fr >= 0 { - lv.SetBit(fr, 0) + lv.SetBit(int(fr), 0) } } } @@ -236,6 +282,9 @@ func (f *FromList) RecalcLeaves() { // // RecalcLen relies on the Leaves member being valid. If it is not known // to be valid, call RecalcLeaves before calling RecalcLen. +// +// RecalcLen will panic if the FromList is cyclic. Use the Cyclic method +// if needed to verify that the FromList is acyclic. func (f *FromList) RecalcLen() { p := f.Paths var setLen func(NI) int @@ -255,7 +304,7 @@ func (f *FromList) RecalcLen() { p[n].Len = 0 } f.MaxLen = 0 - f.Leaves.Iterate(func(n NI) bool { + f.Leaves.IterateOnes(func(n int) bool { if l := setLen(NI(n)); l > f.MaxLen { f.MaxLen = l } @@ -301,75 +350,59 @@ func (f FromList) Root(n NI) NI { // Transpose constructs the directed graph corresponding to FromList f // but with arcs in the opposite direction. That is, from roots toward leaves. // +// If non-nil argrument roots is passed, Transpose populates it as roots of +// the resulting forest and returns nRoots as a count of the roots. +// // The method relies only on the From member of f.Paths. Other members of // the FromList are not used. -// -// See FromList.TransposeRoots for a version that also accumulates and returns -// information about the roots. -func (f FromList) Transpose() Directed { - g := make(AdjacencyList, len(f.Paths)) - for n, p := range f.Paths { - if p.From == -1 { - continue +func (f FromList) Transpose(roots *bits.Bits) (forest Directed, nRoots int) { + p := f.Paths + g := make(AdjacencyList, len(p)) + if roots != nil { + nRoots = len(p) + if roots.Num != nRoots { + *roots = bits.New(nRoots) } - g[p.From] = append(g[p.From], NI(n)) + roots.SetAll() } - return Directed{g} -} - -// TransposeLabeled constructs the directed labeled graph corresponding -// to FromList f but with arcs in the opposite direction. That is, from -// roots toward leaves. -// -// The argument labels can be nil. In this case labels are generated matching -// the path indexes. This corresponds to the "to", or child node. -// -// If labels is non-nil, it must be the same length as f.Paths and is used -// to look up label numbers by the path index. -// -// The method relies only on the From member of f.Paths. Other members of -// the FromList are not used. -// -// See FromList.TransposeLabeledRoots for a version that also accumulates -// and returns information about the roots. -func (f FromList) TransposeLabeled(labels []LI) LabeledDirected { - g := make(LabeledAdjacencyList, len(f.Paths)) - for n, p := range f.Paths { - if p.From == -1 { + for i, e := range p { + if e.From == -1 { continue } - l := LI(n) - if labels != nil { - l = labels[n] + g[e.From] = append(g[e.From], NI(i)) + if roots != nil && roots.Bit(i) == 1 { + roots.SetBit(i, 0) + nRoots-- } - g[p.From] = append(g[p.From], Half{NI(n), l}) } - return LabeledDirected{g} + return Directed{g}, nRoots } -// TransposeLabeledRoots constructs the labeled directed graph corresponding +// TransposeLabeled constructs the labeled directed graph corresponding // to FromList f but with arcs in the opposite direction. That is, from // roots toward leaves. // -// TransposeLabeledRoots also returns a count of roots of the resulting forest -// and a bitmap of the roots. -// // The argument labels can be nil. In this case labels are generated matching // the path indexes. This corresponds to the "to", or child node. // // If labels is non-nil, it must be the same length as t.Paths and is used // to look up label numbers by the path index. // +// If non-nil argrument roots is passed, Transpose populates it as roots of +// the resulting forest and returns nRoots as a count of the roots. +// // The method relies only on the From member of f.Paths. Other members of // the FromList are not used. -// -// See FromList.TransposeLabeled for a simpler verstion that returns the -// forest only. -func (f FromList) TransposeLabeledRoots(labels []LI) (forest LabeledDirected, nRoots int, roots Bits) { +func (f FromList) TransposeLabeled(labels []LI, roots *bits.Bits) (forest LabeledDirected, nRoots int) { p := f.Paths - nRoots = len(p) - roots.SetAll(len(p)) g := make(LabeledAdjacencyList, len(p)) + if roots != nil { + nRoots = len(p) + if roots.Num != nRoots { + *roots = bits.New(nRoots) + } + roots.SetAll() + } for i, p := range f.Paths { if p.From == -1 { continue @@ -378,41 +411,88 @@ func (f FromList) TransposeLabeledRoots(labels []LI) (forest LabeledDirected, nR if labels != nil { l = labels[i] } - n := NI(i) - g[p.From] = append(g[p.From], Half{n, l}) - if roots.Bit(n) == 1 { - roots.SetBit(n, 0) + g[p.From] = append(g[p.From], Half{NI(i), l}) + if roots != nil && roots.Bit(i) == 1 { + roots.SetBit(i, 0) nRoots-- } } - return LabeledDirected{g}, nRoots, roots + return LabeledDirected{g}, nRoots } -// TransposeRoots constructs the directed graph corresponding to FromList f -// but with arcs in the opposite direction. That is, from roots toward leaves. +// Undirected constructs the undirected graph corresponding to FromList f. +// +// The resulting graph will be a tree or forest. // -// TransposeRoots also returns a count of roots of the resulting forest and -// a bitmap of the roots. +// If non-nil argrument roots is passed, Transpose populates it as roots of +// the resulting forest and returns nRoots as a count of the roots. // // The method relies only on the From member of f.Paths. Other members of // the FromList are not used. -// -// See FromList.Transpose for a simpler verstion that returns the forest only. -func (f FromList) TransposeRoots() (forest Directed, nRoots int, roots Bits) { +func (f FromList) Undirected(roots *bits.Bits) (forest Undirected, nRoots int) { p := f.Paths - nRoots = len(p) - roots.SetAll(len(p)) g := make(AdjacencyList, len(p)) + if roots != nil { + nRoots = len(p) + if roots.Num != nRoots { + *roots = bits.New(nRoots) + } + roots.SetAll() + } for i, e := range p { if e.From == -1 { continue } - n := NI(i) - g[e.From] = append(g[e.From], n) - if roots.Bit(n) == 1 { - roots.SetBit(n, 0) + g[i] = append(g[i], e.From) + g[e.From] = append(g[e.From], NI(i)) + if roots != nil && roots.Bit(i) == 1 { + roots.SetBit(i, 0) + nRoots-- + } + } + return Undirected{g}, nRoots +} + +// LabeledUndirected constructs the labeled undirected graph corresponding +// to FromList f. +// +// The resulting graph will be a tree or forest. +// +// The argument labels can be nil. In this case labels are generated matching +// the path indexes. This corresponds to the "to", or child node. +// +// If labels is non-nil, it must be the same length as t.Paths and is used +// to look up label numbers by the path index. +// +// If non-nil argrument roots is passed, LabeledUndirected populates it as +// roots of the resulting forest and returns nRoots as a count of the roots. +// +// The method relies only on the From member of f.Paths. Other members of +// the FromList are not used. +func (f FromList) LabeledUndirected(labels []LI, roots *bits.Bits) (forest LabeledUndirected, nRoots int) { + p := f.Paths + g := make(LabeledAdjacencyList, len(p)) + if roots != nil { + nRoots = len(p) + if roots.Num != nRoots { + *roots = bits.New(nRoots) + } + roots.SetAll() + } + for i, p := range f.Paths { + if p.From == -1 { + continue + } + l := LI(i) + if labels != nil { + l = labels[i] + } + g[i] = append(g[i], Half{p.From, l}) + g[p.From] = append(g[p.From], Half{NI(i), l}) + if roots != nil && roots.Bit(i) == 1 { + roots.SetBit(i, 0) nRoots-- } } - return Directed{g}, nRoots, roots + return LabeledUndirected{g}, nRoots } diff --git a/vendor/github.com/soniakeys/graph/go.mod b/vendor/github.com/soniakeys/graph/go.mod new file mode 100644 index 00000000..67bf8e02 --- /dev/null +++ b/vendor/github.com/soniakeys/graph/go.mod @@ -0,0 +1,3 @@ +module "github.com/soniakeys/graph" + +require "github.com/soniakeys/bits" v1.0.0 diff --git a/vendor/github.com/soniakeys/graph/graph.go b/vendor/github.com/soniakeys/graph/graph.go index a2044e9a..734a4274 100644 --- a/vendor/github.com/soniakeys/graph/graph.go +++ b/vendor/github.com/soniakeys/graph/graph.go @@ -3,40 +3,70 @@ package graph +import ( + "bytes" + "errors" + "fmt" + "math" + "reflect" + "text/template" + + "github.com/soniakeys/bits" +) + // graph.go contains type definitions for all graph types and components. // Also, go generate directives for source transformations. // // For readability, the types are defined in a dependency order: // // NI -// NodeList // AdjacencyList // Directed // Undirected +// Bipartite +// Subgraph +// DirectedSubgraph +// UndirectedSubgraph // LI // Half +// fromHalf // LabeledAdjacencyList // LabeledDirected // LabeledUndirected +// LabeledBipartite +// LabeledSubgraph +// LabeledDirectedSubgraph +// LabeledUndirectedSubgraph // Edge // LabeledEdge +// LabeledPath // WeightFunc // WeightedEdgeList +// TraverseOption //go:generate cp adj_cg.go adj_RO.go //go:generate gofmt -r "LabeledAdjacencyList -> AdjacencyList" -w adj_RO.go //go:generate gofmt -r "n.To -> n" -w adj_RO.go //go:generate gofmt -r "Half -> NI" -w adj_RO.go +//go:generate gofmt -r "LabeledSubgraph -> Subgraph" -w adj_RO.go //go:generate cp dir_cg.go dir_RO.go //go:generate gofmt -r "LabeledDirected -> Directed" -w dir_RO.go +//go:generate gofmt -r "LabeledDirectedSubgraph -> DirectedSubgraph" -w dir_RO.go //go:generate gofmt -r "LabeledAdjacencyList -> AdjacencyList" -w dir_RO.go +//go:generate gofmt -r "labEulerian -> eulerian" -w dir_RO.go +//go:generate gofmt -r "newLabEulerian -> newEulerian" -w dir_RO.go +//go:generate gofmt -r "Half{n, -1} -> n" -w dir_RO.go //go:generate gofmt -r "n.To -> n" -w dir_RO.go //go:generate gofmt -r "Half -> NI" -w dir_RO.go //go:generate cp undir_cg.go undir_RO.go //go:generate gofmt -r "LabeledUndirected -> Undirected" -w undir_RO.go +//go:generate gofmt -r "LabeledBipartite -> Bipartite" -w undir_RO.go +//go:generate gofmt -r "LabeledUndirectedSubgraph -> UndirectedSubgraph" -w undir_RO.go //go:generate gofmt -r "LabeledAdjacencyList -> AdjacencyList" -w undir_RO.go +//go:generate gofmt -r "newLabEulerian -> newEulerian" -w undir_RO.go +//go:generate gofmt -r "Half{n, -1} -> n" -w undir_RO.go //go:generate gofmt -r "n.To -> n" -w undir_RO.go //go:generate gofmt -r "Half -> NI" -w undir_RO.go @@ -47,12 +77,7 @@ package graph // a graph. type NI int32 -// NodeList satisfies sort.Interface. -type NodeList []NI - -func (l NodeList) Len() int { return len(l) } -func (l NodeList) Less(i, j int) bool { return l[i] < l[j] } -func (l NodeList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +var NIBits = reflect.TypeOf(NI(0)).Bits() // An AdjacencyList represents a graph as a list of neighbors for each node. // The "node ID" of a node is simply it's slice index in the AdjacencyList. @@ -83,6 +108,82 @@ type Undirected struct { AdjacencyList // embedded to include AdjacencyList methods } +// Bipartite represents a bipartite graph. +// +// In a bipartite graph, nodes are partitioned into two sets, or +// "colors," such that every edge in the graph goes from one set to the +// other. +// +// Member Color represents the partition with a bitmap of length the same +// as the number of nodes in the graph. For convenience N0 stores the number +// of zero bits in Color. +// +// To construct a Bipartite object, if you can easily or efficiently use +// available information to construct the Color member, then you should do +// this and construct a Bipartite object with a Go struct literal. +// +// If partition information is not readily available, see the constructor +// Undirected.Bipartite. +// +// Alternatively, in some cases where the graph may have multiple connected +// components, the lower level Undirected.BipartiteComponent can be used to +// control color assignment by component. +type Bipartite struct { + Undirected + Color bits.Bits + N0 int +} + +// Subgraph represents a subgraph mapped to a supergraph. +// +// The subgraph is the embedded AdjacencyList and so the Subgraph type inherits +// all methods of Adjacency list. +// +// The embedded subgraph mapped relative to a specific supergraph, member +// Super. A subgraph may have fewer nodes than its supergraph. +// Each node of the subgraph must map to a distinct node of the supergraph. +// +// The mapping giving the supergraph node for a given subgraph node is +// represented by member SuperNI, a slice parallel to the the subgraph. +// +// The mapping in the other direction, giving a subgraph NI for a given +// supergraph NI, is represented with map SubNI. +// +// Multiple Subgraphs can be created relative to a single supergraph. +// The Subgraph type represents a mapping to only a single supergraph however. +// +// See graph methods InduceList and InduceBits for construction of +// node-induced subgraphs. +// +// Alternatively an empty subgraph can be constructed with InduceList(nil). +// Arbitrary subgraphs can then be built up with methods AddNode and AddArc. +type Subgraph struct { + AdjacencyList // the subgraph + Super *AdjacencyList // the supergraph + SubNI map[NI]NI // subgraph NIs, indexed by supergraph NIs + SuperNI []NI // supergraph NIs indexed by subgraph NIs +} + +// DirectedSubgraph represents a subgraph mapped to a supergraph. +// +// See additional doc at Subgraph type. +type DirectedSubgraph struct { + Directed + Super *Directed + SubNI map[NI]NI + SuperNI []NI +} + +// UndirectedSubgraph represents a subgraph mapped to a supergraph. +// +// See additional doc at Subgraph type. +type UndirectedSubgraph struct { + Undirected + Super *Undirected + SubNI map[NI]NI + SuperNI []NI +} + // LI is a label integer, used for associating labels with arcs. type LI int32 @@ -95,6 +196,16 @@ type Half struct { Label LI // half-arc ID for application data, often a weight } +// fromHalf is a half arc, representing a labeled arc and the "neighbor" node +// that the arc originates from. +// +// This used internally in a couple of places. It used to be exported but is +// not currently needed anwhere in the API. +type fromHalf struct { + From NI + Label LI +} + // A LabeledAdjacencyList represents a graph as a list of neighbors for each // node, connected by labeled arcs. // @@ -133,6 +244,62 @@ type LabeledUndirected struct { LabeledAdjacencyList // embedded to include LabeledAdjacencyList methods } +// LabeledBipartite represents a bipartite graph. +// +// In a bipartite graph, nodes are partitioned into two sets, or +// "colors," such that every edge in the graph goes from one set to the +// other. +// +// Member Color represents the partition with a bitmap of length the same +// as the number of nodes in the graph. For convenience N0 stores the number +// of zero bits in Color. +// +// To construct a LabeledBipartite object, if you can easily or efficiently use +// available information to construct the Color member, then you should do +// this and construct a LabeledBipartite object with a Go struct literal. +// +// If partition information is not readily available, see the constructor +// Undirected.LabeledBipartite. +// +// Alternatively, in some cases where the graph may have multiple connected +// components, the lower level LabeledUndirected.BipartiteComponent can be used +// to control color assignment by component. +type LabeledBipartite struct { + LabeledUndirected + Color bits.Bits + N0 int +} + +// LabeledSubgraph represents a subgraph mapped to a supergraph. +// +// See additional doc at Subgraph type. +type LabeledSubgraph struct { + LabeledAdjacencyList + Super *LabeledAdjacencyList + SubNI map[NI]NI + SuperNI []NI +} + +// LabeledDirectedSubgraph represents a subgraph mapped to a supergraph. +// +// See additional doc at Subgraph type. +type LabeledDirectedSubgraph struct { + LabeledDirected + Super *LabeledDirected + SubNI map[NI]NI + SuperNI []NI +} + +// LabeledUndirectedSubgraph represents a subgraph mapped to a supergraph. +// +// See additional doc at Subgraph type. +type LabeledUndirectedSubgraph struct { + LabeledUndirected + Super *LabeledUndirected + SubNI map[NI]NI + SuperNI []NI +} + // Edge is an undirected edge between nodes N1 and N2. type Edge struct{ N1, N2 NI } @@ -142,6 +309,21 @@ type LabeledEdge struct { LI } +// LabeledPath is a start node and a path of half arcs leading from start. +type LabeledPath struct { + Start NI + Path []Half +} + +// Distance returns total path distance given WeightFunc w. +func (p LabeledPath) Distance(w WeightFunc) float64 { + d := 0. + for _, h := range p.Path { + d += w(h.Label) + } + return d +} + // WeightFunc returns a weight for a given label. // // WeightFunc is a parameter type for various search functions. The intent @@ -167,15 +349,432 @@ type WeightedEdgeList struct { Edges []LabeledEdge } -// Len implements sort.Interface. -func (l WeightedEdgeList) Len() int { return len(l.Edges) } +// DistanceMatrix constructs a distance matrix corresponding to the weighted +// edges of l. +// +// An edge n1, n2 with WeightFunc return w is represented by both +// d[n1][n2] == w and d[n2][n1] = w. In case of parallel edges, the lowest +// weight is stored. The distance from any node to itself d[n][n] is 0, unless +// the node has a loop with a negative weight. If g has no edge between n1 and +// distinct n2, +Inf is stored for d[n1][n2] and d[n2][n1]. +// +// The returned DistanceMatrix is suitable for DistanceMatrix.FloydWarshall. +func (l WeightedEdgeList) DistanceMatrix() (d DistanceMatrix) { + d = newDM(l.Order) + for _, e := range l.Edges { + n1 := e.Edge.N1 + n2 := e.Edge.N2 + wt := l.WeightFunc(e.LI) + // < to pick min of parallel arcs (also nicely ignores NaN) + if wt < d[n1][n2] { + d[n1][n2] = wt + d[n2][n1] = wt + } + } + return +} + +// A DistanceMatrix is a square matrix representing some distance between +// nodes of a graph. If the graph is directected, d[from][to] represents +// some distance from node 'from' to node 'to'. Depending on context, the +// distance may be an arc weight or path distance. A value of +Inf typically +// means no arc or no path between the nodes. +type DistanceMatrix [][]float64 + +// little helper function, makes a blank distance matrix for FloydWarshall. +// could be exported? +func newDM(n int) DistanceMatrix { + inf := math.Inf(1) + d := make(DistanceMatrix, n) + for i := range d { + di := make([]float64, n) + for j := range di { + di[j] = inf + } + di[i] = 0 + d[i] = di + } + return d +} + +// FloydWarshall finds all pairs shortest distances for a weighted graph +// without negative cycles. +// +// It operates on a distance matrix representing arcs of a graph and +// destructively replaces arc weights with shortest path distances. +// +// In receiver d, d[fr][to] will be the shortest distance from node +// 'fr' to node 'to'. An element value of +Inf means no path exists. +// Any diagonal element < 0 indicates a negative cycle exists. +// +// See DistanceMatrix constructor methods of LabeledAdjacencyList and +// WeightedEdgeList for suitable inputs. +func (d DistanceMatrix) FloydWarshall() { + for k, dk := range d { + for _, di := range d { + dik := di[k] + for j := range d { + if d2 := dik + dk[j]; d2 < di[j] { + di[j] = d2 + } + } + } + } +} + +// PathMatrix is a return type for FloydWarshallPaths. +// +// It encodes all pairs shortest paths. +type PathMatrix [][]NI + +// Path returns a shortest path from node start to end. +// +// Argument p is truncated, appended to, and returned as the result. +// Thus the underlying allocation is reused if possible. +// If there is no path from start to end, p is returned truncated to +// zero length. +// +// If receiver m is not a valid populated PathMatrix as returned by +// FloydWarshallPaths, behavior is undefined and a panic is likely. +func (m PathMatrix) Path(start, end NI, p []NI) []NI { + p = p[:0] + for { + p = append(p, start) + if start == end { + return p + } + start = m[start][end] + if start < 0 { + return p[:0] + } + } +} + +// FloydWarshallPaths finds all pairs shortest paths for a weighted graph +// without negative cycles. +// +// It operates on a distance matrix representing arcs of a graph and +// destructively replaces arc weights with shortest path distances. +// +// In receiver d, d[fr][to] will be the shortest distance from node +// 'fr' to node 'to'. An element value of +Inf means no path exists. +// Any diagonal element < 0 indicates a negative cycle exists. +// +// The return value encodes the paths. See PathMatrix.Path. +// +// See DistanceMatrix constructor methods of LabeledAdjacencyList and +// WeightedEdgeList for suitable inputs. +// +// See also similar method FloydWarshallFromLists which has a richer +// return value. +func (d DistanceMatrix) FloydWarshallPaths() PathMatrix { + m := make(PathMatrix, len(d)) + inf := math.Inf(1) + for i, di := range d { + mi := make([]NI, len(d)) + for j, dij := range di { + if dij == inf { + mi[j] = -1 + } else { + mi[j] = NI(j) + } + } + m[i] = mi + } + for k, dk := range d { + for i, di := range d { + mi := m[i] + dik := di[k] + for j := range d { + if d2 := dik + dk[j]; d2 < di[j] { + di[j] = d2 + mi[j] = mi[k] + } + } + } + } + return m +} + +// FloydWarshallFromLists finds all pairs shortest paths for a weighted +// graph without negative cycles. +// +// It operates on a distance matrix representing arcs of a graph and +// destructively replaces arc weights with shortest path distances. +// +// In receiver d, d[fr][to] will be the shortest distance from node +// 'fr' to node 'to'. An element value of +Inf means no path exists. +// Any diagonal element < 0 indicates a negative cycle exists. +// +// The return value encodes the paths. The FromLists are fully populated +// with Leaves and Len values. See for example FromList.PathTo for +// extracting paths. Note though that for i'th FromList of the return +// value, PathTo(j) will return the path from j's root, which will not +// be i in the case that there is no path from i to j. You must check +// the first node of the path to see if it is i. If not, there is no +// path from i to j. See example. +// +// See DistanceMatrix constructor methods of LabeledAdjacencyList and +// WeightedEdgeList for suitable inputs. +// +// See also similar method FloydWarshallPaths, which has a lighter +// weight return value. +func (d DistanceMatrix) FloydWarshallFromLists() []FromList { + l := make([]FromList, len(d)) + inf := math.Inf(1) + for i, di := range d { + li := NewFromList(len(d)) + p := li.Paths + for j, dij := range di { + if i == j || dij == inf { + p[j] = PathEnd{From: -1} + } else { + p[j] = PathEnd{From: NI(i)} + } + } + l[i] = li + } + for k, dk := range d { + pk := l[k].Paths + for i, di := range d { + dik := di[k] + pi := l[i].Paths + for j := range d { + if d2 := dik + dk[j]; d2 < di[j] { + di[j] = d2 + pi[j] = pk[j] + } + } + } + } + for _, li := range l { + li.RecalcLeaves() + li.RecalcLen() + } + return l +} -// Less implements sort.Interface. -func (l WeightedEdgeList) Less(i, j int) bool { - return l.WeightFunc(l.Edges[i].LI) < l.WeightFunc(l.Edges[j].LI) +// AddEdge adds an edge to a subgraph. +// +// For argument e, e.N1 and e.N2 must be NIs in supergraph s.Super. As with +// AddNode, AddEdge panics if e.N1 and e.N2 are not valid node indexes of +// s.Super. +// +// Edge e must exist in s.Super. Further, the number of +// parallel edges in the subgraph cannot exceed the number of corresponding +// parallel edges in the supergraph. That is, each edge already added to the +// subgraph counts against the edges available in the supergraph. If a matching +// edge is not available, AddEdge returns an error. +// +// If a matching edge is available, subgraph nodes are added as needed, the +// subgraph edge is added, and the method returns nil. +func (s *UndirectedSubgraph) AddEdge(n1, n2 NI) error { + // verify supergraph NIs first, but without adding subgraph nodes just yet. + if int(n1) < 0 || int(n1) >= s.Super.Order() { + panic(fmt.Sprint("AddEdge: NI ", n1, " not in supergraph")) + } + if int(n2) < 0 || int(n2) >= s.Super.Order() { + panic(fmt.Sprint("AddEdge: NI ", n2, " not in supergraph")) + } + // count existing matching edges in subgraph + n := 0 + a := s.Undirected.AdjacencyList + if b1, ok := s.SubNI[n1]; ok { + if b2, ok := s.SubNI[n2]; ok { + // both NIs already exist in subgraph, need to count edges + for _, t := range a[b1] { + if t == b2 { + n++ + } + } + if b1 != b2 { + // verify reciprocal arcs exist + r := 0 + for _, t := range a[b2] { + if t == b1 { + r++ + } + } + if r < n { + n = r + } + } + } + } + // verify matching edges are available in supergraph + m := 0 + for _, t := range (*s.Super).AdjacencyList[n1] { + if t == n2 { + if m == n { + goto r // arc match after all existing arcs matched + } + m++ + } + } + return errors.New("edge not available in supergraph") +r: + if n1 != n2 { + // verify reciprocal arcs + m = 0 + for _, t := range (*s.Super).AdjacencyList[n2] { + if t == n1 { + if m == n { + goto good + } + m++ + } + } + return errors.New("edge not available in supergraph") + } +good: + // matched enough edges. nodes can finally + // be added as needed and then the edge can be added. + b1 := s.AddNode(n1) + b2 := s.AddNode(n2) + s.Undirected.AddEdge(b1, b2) + return nil // success +} + +// AddEdge adds an edge to a subgraph. +// +// For argument e, e.N1 and e.N2 must be NIs in supergraph s.Super. As with +// AddNode, AddEdge panics if e.N1 and e.N2 are not valid node indexes of +// s.Super. +// +// Edge e must exist in s.Super with label l. Further, the number of +// parallel edges in the subgraph cannot exceed the number of corresponding +// parallel edges in the supergraph. That is, each edge already added to the +// subgraph counts against the edges available in the supergraph. If a matching +// edge is not available, AddEdge returns an error. +// +// If a matching edge is available, subgraph nodes are added as needed, the +// subgraph edge is added, and the method returns nil. +func (s *LabeledUndirectedSubgraph) AddEdge(e Edge, l LI) error { + // verify supergraph NIs first, but without adding subgraph nodes just yet. + if int(e.N1) < 0 || int(e.N1) >= s.Super.Order() { + panic(fmt.Sprint("AddEdge: NI ", e.N1, " not in supergraph")) + } + if int(e.N2) < 0 || int(e.N2) >= s.Super.Order() { + panic(fmt.Sprint("AddEdge: NI ", e.N2, " not in supergraph")) + } + // count existing matching edges in subgraph + n := 0 + a := s.LabeledUndirected.LabeledAdjacencyList + if b1, ok := s.SubNI[e.N1]; ok { + if b2, ok := s.SubNI[e.N2]; ok { + // both NIs already exist in subgraph, need to count edges + h := Half{b2, l} + for _, t := range a[b1] { + if t == h { + n++ + } + } + if b1 != b2 { + // verify reciprocal arcs exist + r := 0 + h.To = b1 + for _, t := range a[b2] { + if t == h { + r++ + } + } + if r < n { + n = r + } + } + } + } + // verify matching edges are available in supergraph + m := 0 + h := Half{e.N2, l} + for _, t := range (*s.Super).LabeledAdjacencyList[e.N1] { + if t == h { + if m == n { + goto r // arc match after all existing arcs matched + } + m++ + } + } + return errors.New("edge not available in supergraph") +r: + if e.N1 != e.N2 { + // verify reciprocal arcs + m = 0 + h.To = e.N1 + for _, t := range (*s.Super).LabeledAdjacencyList[e.N2] { + if t == h { + if m == n { + goto good + } + m++ + } + } + return errors.New("edge not available in supergraph") + } +good: + // matched enough edges. nodes can finally + // be added as needed and then the edge can be added. + n1 := s.AddNode(e.N1) + n2 := s.AddNode(e.N2) + s.LabeledUndirected.AddEdge(Edge{n1, n2}, l) + return nil // success } -// Swap implements sort.Interface. -func (l WeightedEdgeList) Swap(i, j int) { - l.Edges[i], l.Edges[j] = l.Edges[j], l.Edges[i] +// utility function called from all of the InduceList methods. +func mapList(l []NI) (sub map[NI]NI, sup []NI) { + sub = map[NI]NI{} + // one pass to collect unique NIs + for _, p := range l { + sub[NI(p)] = -1 + } + if len(sub) == len(l) { // NIs in l are unique + sup = append([]NI{}, l...) // just copy them + for b, p := range l { + sub[p] = NI(b) // and fill in map + } + } else { // NIs in l not unique + sup = make([]NI, 0, len(sub)) + for _, p := range l { // preserve ordering of first occurrences in l + if sub[p] < 0 { + sub[p] = NI(len(sup)) + sup = append(sup, p) + } + } + } + return +} + +// utility function called from all of the InduceBits methods. +func mapBits(t bits.Bits) (sub map[NI]NI, sup []NI) { + sup = make([]NI, 0, t.OnesCount()) + sub = make(map[NI]NI, cap(sup)) + t.IterateOnes(func(n int) bool { + sub[NI(n)] = NI(len(sup)) + sup = append(sup, NI(n)) + return true + }) + return +} + +// OrderMap formats maps for testable examples. +// +// OrderMap provides simple, no-frills formatting of maps in sorted order, +// convenient in some cases for output of testable examples. +func OrderMap(m interface{}) string { + // in particular exclude slices, which template would happily accept but + // which would probably represent a coding mistake + if reflect.TypeOf(m).Kind() != reflect.Map { + panic("not a map") + } + t := template.Must(template.New("").Parse( + `map[{{range $k, $v := .}}{{$k}}:{{$v}} {{end}}]`)) + var b bytes.Buffer + if err := t.Execute(&b, m); err != nil { + panic(err) + } + if bytes.HasSuffix(b.Bytes(), []byte(" ]")) { + b.Truncate(b.Len() - 2) + b.WriteByte(']') + } + return b.String() } diff --git a/vendor/github.com/soniakeys/graph/hacking.adoc b/vendor/github.com/soniakeys/graph/hacking.adoc new file mode 100644 index 00000000..876d33ff --- /dev/null +++ b/vendor/github.com/soniakeys/graph/hacking.adoc @@ -0,0 +1,135 @@ += Hacking + +== Get, install +Basic use of the package is just go get, or git clone; go install. There are +no dependencies outside the standard library. + +== Build +CI is currently on travis-ci.org. + +The build runs go vet with a few exceptions for things I'm not a big fan of. + +https://github.com/client9/misspell has been valuable. + +Also I wrote https://github.com/soniakeys/vetc to validate that each source +file has copyright/license statement. + +Then, it’s not in the ci script, but I wrote https://github.com/soniakeys/rcv +to put coverage stats in the readme. Maybe it could be commit hook or +something but for now I’ll try just running it manually now and then. + +Go fmt is not in the ci script, but I have at least one editor set up to run +it on save, so code should stay formatted pretty well. + +== Examples with random output +The math/rand generators with constant seeds used to give consistent numbers +across Go versions and so some examples relied on this. Sometime after Go 1.9 +though the numbers changed. The technique for now is to go ahead and write +those examples, get them working, then change the `// Output:` line to +`// Random output:`. This keeps them showing in go doc but keeps them from +being run by go test. This works for now. It might be revisited at some +point. + +== Plans +The primary to-do list is the issue tracker on Github. + +== Direction, focus, features +The project started with no real goal or purpose, just as a place for some code +that might be useful. Here are some elements that characterize the direction. + +* The focus has been on algorithms on adjacency lists. That is, adjacency list + is the fundamental representation for most implemented algorithms. There are + many other interesting representations, many reasons to use them, but + adjacency list is common in literature and practice. It has been useful to + focus on this data representation, at first anyway. + +* The focus has been on single threaded algorithms. Again, there is much new + and interesting work being done with concurrent, parallel, and distributed + graph algorithms, and Go might be an excellent language to implement some of + these algorithms. But as a preliminary step, more traditional + single-threaded algorithms are implemented. + +* The focus has been on static finite graphs. Again there is much interesting + work in online algorithms, dynamic graphs, and infinite graphs, but these + are not generally considered here. + +* Algorithms selected for implementation are generally ones commonly appearing + in beginning graph theory discussions and in general purpose graph libraries + in other programming languages. With these as drivers, there's a big risk + developing a library of curiosities and academic exercises rather than a + library of practical utility. But well, it's a start. The hope is that + there are some practical drivers behind graph theory and behind other graph + libraries. + +* There is active current research going on in graph algorithm development. + One goal for this library is to implement newer and faster algorithms. + In some cases where it seems not too much work, older/classic/traditional + algorithms may be implemented for comparison. These generally go in the + alt subdirectory. + +== General principles +* The API is rather low level. + +* Slices instead of maps. Maps are pretty efficient, and the property of + unique keys can be useful, But slices are still faster and more efficient, + and the unique key property is not always needed or wanted. The Adjacency + list implementation of this library is all done in slices. Slices are used + in algorithms where possible, in preference to maps. Maps are still used in + some cases where uniqueness is needed. + +* Interfaces not generally used. Algorithms are implemented directly on + concrete data types and not on interfaces describing the capabilities of + the data types. The abstraction of interfaces is a nice match to graph + theory and the convenience of running graph algorithms on any type that + implements an interface is appealing, but the costs seem too high to me. + Slices are rich with capababilites that get hidden behind interfaces and + direct slice manipulation is always faster than going through interfaces. + An impedance for programs using the library is that they will generally + have to implement a mapping from slice indexes to their application data, + often including for example, some other form of node ID. This seems fair + to push this burden outside the graph library; the library cannot know + the needs of this mapping. + +* Bitsets are widely used, particularly to store one bit of information per + node of a graph. I used math/big at first but then moved to a dense bitset + of my own. Yes, I considered other third-party bitsets but had my own + feature set I wanted. A slice of bools is another alternative. Bools will + be faster in almost all cases but the bitset will use less memory. I'm + chosing size over speed for now. + +* Code generation is used to provide methods that work on both labeled and + unlabeled graphs. Code is written to labeled types, then transformations + generate the unlabled equivalents. + +* Methods are named for what they return rather than what they do, where + reasonable anyway. + +* Consistency in method signature and behavior across corresponding methods, + for example directed/undirected, labeled/unlabeled, again, as long as it's + reasonable. + +* Sometimes in tension with the consistency principle, methods are lazy about + datatypes of parameters and return values. Sometimes a vale might have + different reasonable representations, a set might be a bitset, map, slice + of bools, or slice of set members for example. Methods will take and return + whatever is convenient for them and not convert the form just for consistency + or to try to guess what a caller would prefer. + +* Methods return multiple results for whatever the algorithm produces that + might be of interest. Sometimes an algorithm will have a primary result but + then some secondary values that also might be of interest. If they are + already computed as a byproduct of the algorithm, or can be computed at + negligible cost, return them. + +* Sometimes in conflict with the multiple result principle, methods will not + speculatively compute secondary results if there is any significant cost + and if the secondary result can be just as easily computed later. + +== Code Maintenance +There are tons of cut and paste variants. There's the basic AdjacencyList, +then Directed and Undirected variants, then Labeled variants of each of those. +Code gen helps avoid some cut and paste but there's a bunch that doesn't +code gen very well and so is duplicated with cut and paste. In particular +the testable examples in the _test files don't cg well and so are pretty much +all duplicated by hand. If you change code, think about where there should +be variants and go look to see if the variants need similar changes. diff --git a/vendor/github.com/soniakeys/graph/hacking.md b/vendor/github.com/soniakeys/graph/hacking.md deleted file mode 100644 index 30d2d7c5..00000000 --- a/vendor/github.com/soniakeys/graph/hacking.md +++ /dev/null @@ -1,37 +0,0 @@ -#Hacking - -Basic use of the package is just go get, or git clone; go install. There are -no dependencies outside the standard library. - -The primary to-do list is the issue tracker on Github. I maintained a -journal on google drive for a while but at some point filed issues for all -remaining ideas in that document that still seemed relevant. So currently -there is no other roadmap or planning document. - -CI is currently on travis-ci.org. The .travis.yml builds for go 1.2.1 -following https://github.com/soniakeys/graph/issues/49, and it currently builds -for go 1.6 as well. The travis script calls a shell script right away because -I didn’t see a way to get it to do different steps for the different go -versions. For 1.2.1, I just wanted the basic tests. For a current go version -such as 1.6, there’s a growing list of checks. - -The GOARCH=386 test is for https://github.com/soniakeys/graph/issues/41. -The problem is the architecture specific code in bits32.go and bits64.go. -Yes, there are architecture independent algorithms. There is also assembly -to access machine instructions. Anyway, it’s the way it is for now. - -Im not big on making go vet happy just for a badge but I really like the -example check that I believe appeared with go 1.6. (I think it will be a -standard check with 1.7, so the test script will have to change then.) - -https://github.com/client9/misspell has been valuable. - -Also I wrote https://github.com/soniakeys/vetc to validate that each source -file has copyright/license statement. - -Then, it’s not in the ci script, but I wrote https://github.com/soniakeys/rcv -to put coverage stats in the readme. Maybe it could be commit hook or -something but for now I’ll try just running it manually now and then. - -Go fmt is not in the ci script, but I have at least one editor set up to run -it on save, so code should stay formatted pretty well. diff --git a/vendor/github.com/soniakeys/graph/mst.go b/vendor/github.com/soniakeys/graph/mst.go index 028e680c..92a9175c 100644 --- a/vendor/github.com/soniakeys/graph/mst.go +++ b/vendor/github.com/soniakeys/graph/mst.go @@ -6,6 +6,8 @@ package graph import ( "container/heap" "sort" + + "github.com/soniakeys/bits" ) type dsElement struct { @@ -82,35 +84,47 @@ func (ds disjointSet) find(n NI) NI { // Kruskal implements Kruskal's algorithm for constructing a minimum spanning // forest on an undirected graph. // -// While the input graph is interpreted as undirected, the receiver edge list -// does not actually need to contain reciprocal arcs. A property of the -// algorithm is that arc direction is ignored. Thus only a single arc out of -// a reciprocal pair must be present in the edge list. Reciprocal arcs (and -// parallel arcs) are allowed though, and do not affect the result. +// The forest is returned as an undirected graph. +// +// Also returned is a total distance for the returned forest. +// +// This method is a convenience wrapper for LabeledEdgeList.Kruskal. +// If you have no need for the input graph as a LabeledUndirected, it may be +// more efficient to construct a LabeledEdgeList directly. +func (g LabeledUndirected) Kruskal(w WeightFunc) (spanningForest LabeledUndirected, dist float64) { + return g.WeightedArcsAsEdges(w).Kruskal() +} + +// Kruskal implements Kruskal's algorithm for constructing a minimum spanning +// forest on an undirected graph. +// +// The algorithm allows parallel edges, thus it is acceptable to construct +// the receiver with LabeledUndirected.WeightedArcsAsEdges. It may be more +// efficient though, if you can construct the receiver WeightedEdgeList +// directly without parallel edges. // // The forest is returned as an undirected graph. // // Also returned is a total distance for the returned forest. // -// The edge list of the receiver is sorted as a side effect of this method. -// See KruskalSorted for a version that relies on the edge list being already -// sorted. +// The edge list of the receiver is sorted in place as a side effect of this +// method. See KruskalSorted for a version that relies on the edge list being +// already sorted. This method is a wrapper for KruskalSorted. If you can +// generate the input graph sorted as required for KruskalSorted, you can +// call that method directly and avoid the overhead of the sort. func (l WeightedEdgeList) Kruskal() (g LabeledUndirected, dist float64) { - sort.Sort(l) + e := l.Edges + w := l.WeightFunc + sort.Slice(e, func(i, j int) bool { return w(e[i].LI) < w(e[j].LI) }) return l.KruskalSorted() } // KruskalSorted implements Kruskal's algorithm for constructing a minimum // spanning tree on an undirected graph. // -// While the input graph is interpreted as undirected, the receiver edge list -// does not actually need to contain reciprocal arcs. A property of the -// algorithm is that arc direction is ignored. Thus only a single arc out of -// a reciprocal pair must be present in the edge list. Reciprocal arcs (and -// parallel arcs) are allowed though, and do not affect the result. -// // When called, the edge list of the receiver must be already sorted by weight. -// See Kruskal for a version that accepts an unsorted edge list. +// See the Kruskal method for a version that accepts an unsorted edge list. +// As with Kruskal, parallel edges are allowed. // // The forest is returned as an undirected graph. // @@ -148,11 +162,14 @@ func (l WeightedEdgeList) KruskalSorted() (g LabeledUndirected, dist float64) { // Returned are the number of nodes spanned for the single tree (which will be // the order of the connected component) and the total spanned distance for the // single tree. -func (g LabeledUndirected) Prim(start NI, w WeightFunc, f *FromList, labels []LI, componentLeaves *Bits) (numSpanned int, dist float64) { +func (g LabeledUndirected) Prim(start NI, w WeightFunc, f *FromList, labels []LI, componentLeaves *bits.Bits) (numSpanned int, dist float64) { al := g.LabeledAdjacencyList if len(f.Paths) != len(al) { *f = NewFromList(len(al)) } + if f.Leaves.Num != len(al) { + f.Leaves = bits.New(len(al)) + } b := make([]prNode, len(al)) // "best" for n := range b { b[n].nx = NI(n) @@ -163,9 +180,12 @@ func (g LabeledUndirected) Prim(start NI, w WeightFunc, f *FromList, labels []LI rp[start] = PathEnd{From: -1, Len: 1} numSpanned = 1 fLeaves := &f.Leaves - fLeaves.SetBit(start, 1) + fLeaves.SetBit(int(start), 1) if componentLeaves != nil { - componentLeaves.SetBit(start, 1) + if componentLeaves.Num != len(al) { + *componentLeaves = bits.New(len(al)) + } + componentLeaves.SetBit(int(start), 1) } for a := start; ; { for _, nb := range al[a] { @@ -194,27 +214,17 @@ func (g LabeledUndirected) Prim(start NI, w WeightFunc, f *FromList, labels []LI labels[a] = bp.from.Label } dist += bp.wt - fLeaves.SetBit(bp.from.From, 0) - fLeaves.SetBit(a, 1) + fLeaves.SetBit(int(bp.from.From), 0) + fLeaves.SetBit(int(a), 1) if componentLeaves != nil { - componentLeaves.SetBit(bp.from.From, 0) - componentLeaves.SetBit(a, 1) + componentLeaves.SetBit(int(bp.from.From), 0) + componentLeaves.SetBit(int(a), 1) } numSpanned++ } return } -// fromHalf is a half arc, representing a labeled arc and the "neighbor" node -// that the arc originates from. -// -// (This used to be exported when there was a LabeledFromList. Currently -// unexported now that it seems to have much more limited use.) -type fromHalf struct { - From NI - Label LI -} - type prNode struct { nx NI from fromHalf diff --git a/vendor/github.com/soniakeys/graph/random.go b/vendor/github.com/soniakeys/graph/random.go index 99f04458..e4bbe30c 100644 --- a/vendor/github.com/soniakeys/graph/random.go +++ b/vendor/github.com/soniakeys/graph/random.go @@ -7,9 +7,67 @@ import ( "errors" "math" "math/rand" - "time" + + "github.com/soniakeys/bits" ) +// ChungLu constructs a random simple undirected graph. +// +// The Chung Lu model is similar to a "configuration model" where each +// node has a specified degree. In the Chung Lu model the degree specified +// for each node is taken as an expected degree, not an exact degree. +// +// Argument w is "weight," the expected degree for each node. +// The values of w must be given in decreasing order. +// +// The constructed graph will have node 0 with expected degree w[0] and so on +// so degree will decrease with node number. To randomize degree across +// node numbers, consider using the Permute method with a rand.Perm. +// +// Also returned is the actual size m of constructed graph g. +// +// If Rand r is nil, the rand package default shared source is used. +func ChungLu(w []float64, rr *rand.Rand) (g Undirected, m int) { + // Ref: "Efficient Generation of Networks with Given Expected Degrees" + // Joel C. Miller and Aric Hagberg + // accessed at http://aric.hagberg.org/papers/miller-2011-efficient.pdf + rf := rand.Float64 + if rr != nil { + rf = rr.Float64 + } + a := make(AdjacencyList, len(w)) + S := 0. + for i := len(w) - 1; i >= 0; i-- { + S += w[i] + } + for u := 0; u < len(w)-1; u++ { + v := u + 1 + p := w[u] * w[v] / S + if p > 1 { + p = 1 + } + for v < len(w) && p > 0 { + if p != 1 { + v += int(math.Log(rf()) / math.Log(1-p)) + } + if v < len(w) { + q := w[u] * w[v] / S + if q > 1 { + q = 1 + } + if rf() < q/p { + a[u] = append(a[u], NI(v)) + a[v] = append(a[v], NI(u)) + m++ + } + p = q + v++ + } + } + } + return Undirected{a}, m +} + // Euclidean generates a random simple graph on the Euclidean plane. // // Nodes are associated with coordinates uniformly distributed on a unit @@ -28,23 +86,24 @@ import ( // combinations of nNodes and nArcs cannot be achieved with any amount of // patience given that the returned graph must be simple. // -// If Rand r is nil, the method creates a new source and generator for -// one-time use. +// If Rand r is nil, the rand package default shared source is used. // // Returned is a directed simple graph and associated positions indexed by -// node number. +// node number. In the arc list for each node, to-nodes are in random +// order. // // See also LabeledEuclidean. -func Euclidean(nNodes, nArcs int, affinity float64, patience int, r *rand.Rand) (g Directed, pos []struct{ X, Y float64 }, err error) { +func Euclidean(nNodes, nArcs int, affinity float64, patience int, rr *rand.Rand) (g Directed, pos []struct{ X, Y float64 }, err error) { a := make(AdjacencyList, nNodes) // graph - // generate random positions - if r == nil { - r = rand.New(rand.NewSource(time.Now().UnixNano())) + ri, rf, re := rand.Intn, rand.Float64, rand.ExpFloat64 + if rr != nil { + ri, rf, re = rr.Intn, rr.Float64, rr.ExpFloat64 } + // generate random positions pos = make([]struct{ X, Y float64 }, nNodes) for i := range pos { - pos[i].X = r.Float64() - pos[i].Y = r.Float64() + pos[i].X = rf() + pos[i].Y = rf() } // arcs var tooFar, dup int @@ -58,10 +117,10 @@ arc: err = errors.New("overcrowding") return } - n1 := NI(r.Intn(nNodes)) + n1 := NI(ri(nNodes)) var n2 NI for { - n2 = NI(r.Intn(nNodes)) + n2 = NI(ri(nNodes)) if n2 != n1 { // no graph loops break } @@ -69,7 +128,7 @@ arc: c1 := &pos[n1] c2 := &pos[n2] dist := math.Hypot(c2.X-c1.X, c2.Y-c1.Y) - if dist*affinity > r.ExpFloat64() { // favor near nodes + if dist*affinity > re() { // favor near nodes tooFar++ continue } @@ -93,17 +152,18 @@ arc: // // Otherwise the function arguments and return values are the same as for // function Euclidean. See Euclidean. -func LabeledEuclidean(nNodes, nArcs int, affinity float64, patience int, r *rand.Rand) (g LabeledDirected, pos []struct{ X, Y float64 }, wt []float64, err error) { +func LabeledEuclidean(nNodes, nArcs int, affinity float64, patience int, rr *rand.Rand) (g LabeledDirected, pos []struct{ X, Y float64 }, wt []float64, err error) { a := make(LabeledAdjacencyList, nNodes) // graph wt = make([]float64, nArcs) // arc weights - // generate random positions - if r == nil { - r = rand.New(rand.NewSource(time.Now().UnixNano())) + ri, rf, re := rand.Intn, rand.Float64, rand.ExpFloat64 + if rr != nil { + ri, rf, re = rr.Intn, rr.Float64, rr.ExpFloat64 } + // generate random positions pos = make([]struct{ X, Y float64 }, nNodes) for i := range pos { - pos[i].X = r.Float64() - pos[i].Y = r.Float64() + pos[i].X = rf() + pos[i].Y = rf() } // arcs var tooFar, dup int @@ -117,10 +177,10 @@ arc: err = errors.New("overcrowding") return } - n1 := NI(r.Intn(nNodes)) + n1 := NI(ri(nNodes)) var n2 NI for { - n2 = NI(r.Intn(nNodes)) + n2 = NI(ri(nNodes)) if n2 != n1 { // no graph loops break } @@ -128,7 +188,7 @@ arc: c1 := &pos[n1] c2 := &pos[n2] dist := math.Hypot(c2.X-c1.X, c2.Y-c1.Y) - if dist*affinity > r.ExpFloat64() { // favor near nodes + if dist*affinity > re() { // favor near nodes tooFar++ continue } @@ -154,27 +214,36 @@ arc: // // The resulting number of edges is somewhat random but asymptotically // approaches m = πr²n²/2. The method accumulates and returns the actual -// number of edges constructed. +// number of edges constructed. In the arc list for each node, to-nodes are +// ordered. Consider using ShuffleArcLists if random order is important. // -// If Rand r is nil, the method creates a new source and generator for -// one-time use. +// If Rand r is nil, the rand package default shared source is used. // // See also LabeledGeometric. -func Geometric(nNodes int, radius float64, r *rand.Rand) (g Undirected, pos []struct{ X, Y float64 }, m int) { +func Geometric(nNodes int, radius float64, rr *rand.Rand) (g Undirected, pos []struct{ X, Y float64 }, m int) { // Expected degree is approximately nπr². a := make(AdjacencyList, nNodes) - if r == nil { - r = rand.New(rand.NewSource(time.Now().UnixNano())) + rf := rand.Float64 + if rr != nil { + rf = rr.Float64 } pos = make([]struct{ X, Y float64 }, nNodes) for i := range pos { - pos[i].X = r.Float64() - pos[i].Y = r.Float64() + pos[i].X = rf() + pos[i].Y = rf() } for u, up := range pos { for v := u + 1; v < len(pos); v++ { vp := pos[v] - if math.Hypot(up.X-vp.X, up.Y-vp.Y) < radius { + dx := math.Abs(up.X - vp.X) + if dx >= radius { + continue + } + dy := math.Abs(up.Y - vp.Y) + if dy >= radius { + continue + } + if math.Hypot(dx, dy) < radius { a[u] = append(a[u], NI(v)) a[v] = append(a[v], NI(u)) m++ @@ -193,15 +262,16 @@ func Geometric(nNodes int, radius float64, r *rand.Rand) (g Undirected, pos []st // size m is len(wt). // // See Geometric for additional description. -func LabeledGeometric(nNodes int, radius float64, r *rand.Rand) (g LabeledUndirected, pos []struct{ X, Y float64 }, wt []float64) { +func LabeledGeometric(nNodes int, radius float64, rr *rand.Rand) (g LabeledUndirected, pos []struct{ X, Y float64 }, wt []float64) { a := make(LabeledAdjacencyList, nNodes) - if r == nil { - r = rand.New(rand.NewSource(time.Now().UnixNano())) + rf := rand.Float64 + if rr != nil { + rf = rr.Float64 } pos = make([]struct{ X, Y float64 }, nNodes) for i := range pos { - pos[i].X = r.Float64() - pos[i].Y = r.Float64() + pos[i].X = rf() + pos[i].Y = rf() } for u, up := range pos { for v := u + 1; v < len(pos); v++ { @@ -217,20 +287,332 @@ func LabeledGeometric(nNodes int, radius float64, r *rand.Rand) (g LabeledUndire return } +// GnmUndirected constructs a random simple undirected graph. +// +// Construction is by the Erdős–Rényi model where the specified number of +// distinct edges is selected from all possible edges with equal probability. +// +// Argument n is number of nodes, m is number of edges and must be <= n(n-1)/2. +// +// If Rand r is nil, the rand package default shared source is used. +// +// In the generated arc list for each node, to-nodes are ordered. +// Consider using ShuffleArcLists if random order is important. +// +// See also Gnm3Undirected, a method producing a statistically equivalent +// result, but by an algorithm with somewhat different performance properties. +// Performance of the two methods is expected to be similar in most cases but +// it may be worth trying both with your data to see if one has a clear +// advantage. +func GnmUndirected(n, m int, rr *rand.Rand) Undirected { + // based on Alg. 2 from "Efficient Generation of Large Random Networks", + // Vladimir Batagelj and Ulrik Brandes. + // accessed at http://algo.uni-konstanz.de/publications/bb-eglrn-05.pdf + ri := rand.Intn + if rr != nil { + ri = rr.Intn + } + re := n * (n - 1) / 2 + ml := m + if m*2 > re { + ml = re - m + } + e := map[int]struct{}{} + for len(e) < ml { + e[ri(re)] = struct{}{} + } + a := make(AdjacencyList, n) + if m*2 > re { + i := 0 + for v := 1; v < n; v++ { + for w := 0; w < v; w++ { + if _, ok := e[i]; !ok { + a[v] = append(a[v], NI(w)) + a[w] = append(a[w], NI(v)) + } + i++ + } + } + } else { + for i := range e { + v := 1 + int(math.Sqrt(.25+float64(2*i))-.5) + w := i - (v * (v - 1) / 2) + a[v] = append(a[v], NI(w)) + a[w] = append(a[w], NI(v)) + } + } + return Undirected{a} +} + +// GnmDirected constructs a random simple directed graph. +// +// Construction is by the Erdős–Rényi model where the specified number of +// distinct arcs is selected from all possible arcs with equal probability. +// +// Argument n is number of nodes, ma is number of arcs and must be <= n(n-1). +// +// If Rand r is nil, the rand package default shared source is used. +// +// In the generated arc list for each node, to-nodes are ordered. +// Consider using ShuffleArcLists if random order is important. +// +// See also Gnm3Directed, a method producing a statistically equivalent +// result, but by +// an algorithm with somewhat different performance properties. Performance +// of the two methods is expected to be similar in most cases but it may be +// worth trying both with your data to see if one has a clear advantage. +func GnmDirected(n, ma int, rr *rand.Rand) Directed { + // based on Alg. 2 from "Efficient Generation of Large Random Networks", + // Vladimir Batagelj and Ulrik Brandes. + // accessed at http://algo.uni-konstanz.de/publications/bb-eglrn-05.pdf + ri := rand.Intn + if rr != nil { + ri = rr.Intn + } + re := n * (n - 1) + ml := ma + if ma*2 > re { + ml = re - ma + } + e := map[int]struct{}{} + for len(e) < ml { + e[ri(re)] = struct{}{} + } + a := make(AdjacencyList, n) + if ma*2 > re { + i := 0 + for v := 0; v < n; v++ { + for w := 0; w < n; w++ { + if w == v { + continue + } + if _, ok := e[i]; !ok { + a[v] = append(a[v], NI(w)) + } + i++ + } + } + } else { + for i := range e { + v := i / (n - 1) + w := i % (n - 1) + if w >= v { + w++ + } + a[v] = append(a[v], NI(w)) + } + } + return Directed{a} +} + +// Gnm3Undirected constructs a random simple undirected graph. +// +// Construction is by the Erdős–Rényi model where the specified number of +// distinct edges is selected from all possible edges with equal probability. +// +// Argument n is number of nodes, m is number of edges and must be <= n(n-1)/2. +// +// If Rand r is nil, the rand package default shared source is used. +// +// In the generated arc list for each node, to-nodes are ordered. +// Consider using ShuffleArcLists if random order is important. +// +// See also GnmUndirected, a method producing a statistically equivalent +// result, but by an algorithm with somewhat different performance properties. +// Performance of the two methods is expected to be similar in most cases but +// it may be worth trying both with your data to see if one has a clear +// advantage. +func Gnm3Undirected(n, m int, rr *rand.Rand) Undirected { + // based on Alg. 3 from "Efficient Generation of Large Random Networks", + // Vladimir Batagelj and Ulrik Brandes. + // accessed at http://algo.uni-konstanz.de/publications/bb-eglrn-05.pdf + // + // I like this algorithm for its elegance. Pitty it tends to run a + // a little slower than the retry algorithm of Gnm. + ri := rand.Intn + if rr != nil { + ri = rr.Intn + } + a := make(AdjacencyList, n) + re := n * (n - 1) / 2 + rm := map[int]int{} + for i := 0; i < m; i++ { + er := i + ri(re-i) + eNew := er + if rp, ok := rm[er]; ok { + eNew = rp + } + if rp, ok := rm[i]; !ok { + rm[er] = i + } else { + rm[er] = rp + } + v := 1 + int(math.Sqrt(.25+float64(2*eNew))-.5) + w := eNew - (v * (v - 1) / 2) + a[v] = append(a[v], NI(w)) + a[w] = append(a[w], NI(v)) + } + return Undirected{a} +} + +// Gnm3Directed constructs a random simple directed graph. +// +// Construction is by the Erdős–Rényi model where the specified number of +// distinct arcs is selected from all possible arcs with equal probability. +// +// Argument n is number of nodes, ma is number of arcs and must be <= n(n-1). +// +// If Rand r is nil, the rand package default shared source is used. +// +// In the generated arc list for each node, to-nodes are ordered. +// Consider using ShuffleArcLists if random order is important. +// +// See also GnmDirected, a method producing a statistically equivalent result, +// but by an algorithm with somewhat different performance properties. +// Performance of the two methods is expected to be similar in most cases +// but it may be worth trying both with your data to see if one has a clear +// advantage. +func Gnm3Directed(n, ma int, rr *rand.Rand) Directed { + // based on Alg. 3 from "Efficient Generation of Large Random Networks", + // Vladimir Batagelj and Ulrik Brandes. + // accessed at http://algo.uni-konstanz.de/publications/bb-eglrn-05.pdf + ri := rand.Intn + if rr != nil { + ri = rr.Intn + } + a := make(AdjacencyList, n) + re := n * (n - 1) + rm := map[int]int{} + for i := 0; i < ma; i++ { + er := i + ri(re-i) + eNew := er + if rp, ok := rm[er]; ok { + eNew = rp + } + if rp, ok := rm[i]; !ok { + rm[er] = i + } else { + rm[er] = rp + } + v := eNew / (n - 1) + w := eNew % (n - 1) + if w >= v { + w++ + } + a[v] = append(a[v], NI(w)) + } + return Directed{a} +} + +// GnpUndirected constructs a random simple undirected graph. +// +// Construction is by the Gilbert model, an Erdős–Rényi like model where +// distinct edges are independently selected from all possible edges with +// the specified probability. +// +// Argument n is number of nodes, p is probability for selecting an edge. +// +// If Rand r is nil, the rand package default shared source is used. +// +// In the generated arc list for each node, to-nodes are ordered. +// Consider using ShuffleArcLists if random order is important. +// +// Also returned is the actual size m of constructed graph g. +func GnpUndirected(n int, p float64, rr *rand.Rand) (g Undirected, m int) { + a := make(AdjacencyList, n) + if n < 2 { + return Undirected{a}, 0 + } + rf := rand.Float64 + if rr != nil { + rf = rr.Float64 + } + // based on Alg. 1 from "Efficient Generation of Large Random Networks", + // Vladimir Batagelj and Ulrik Brandes. + // accessed at http://algo.uni-konstanz.de/publications/bb-eglrn-05.pdf + var v, w NI = 1, -1 +g: + for c := 1 / math.Log(1-p); ; { + w += 1 + NI(c*math.Log(1-rf())) + for { + if w < v { + a[v] = append(a[v], w) + a[w] = append(a[w], v) + m++ + continue g + } + w -= v + v++ + if v == NI(n) { + break g + } + } + } + return Undirected{a}, m +} + +// GnpDirected constructs a random simple directed graph. +// +// Construction is by the Gilbert model, an Erdős–Rényi like model where +// distinct arcs are independently selected from all possible arcs with +// the specified probability. +// +// Argument n is number of nodes, p is probability for selecting an arc. +// +// If Rand r is nil, the rand package default shared source is used. +// +// In the generated arc list for each node, to-nodes are ordered. +// Consider using ShuffleArcLists if random order is important. +// +// Also returned is the actual arc size m of constructed graph g. +func GnpDirected(n int, p float64, rr *rand.Rand) (g Directed, ma int) { + a := make(AdjacencyList, n) + if n < 2 { + return Directed{a}, 0 + } + rf := rand.Float64 + if rr != nil { + rf = rr.Float64 + } + // based on Alg. 1 from "Efficient Generation of Large Random Networks", + // Vladimir Batagelj and Ulrik Brandes. + // accessed at http://algo.uni-konstanz.de/publications/bb-eglrn-05.pdf + var v, w NI = 0, -1 +g: + for c := 1 / math.Log(1-p); ; { + w += 1 + NI(c*math.Log(1-rf())) + for ; ; w -= NI(n) { + if w == v { + w++ + } + if w < NI(n) { + a[v] = append(a[v], w) + ma++ + continue g + } + v++ + if v == NI(n) { + break g + } + } + } + return Directed{a}, ma +} + // KroneckerDirected generates a Kronecker-like random directed graph. // // The returned graph g is simple and has no isolated nodes but is not // necessarily fully connected. The number of of nodes will be <= 2^scale, // and will be near 2^scale for typical values of arcFactor, >= 2. // ArcFactor * 2^scale arcs are generated, although loops and duplicate arcs -// are rejected. +// are rejected. In the arc list for each node, to-nodes are in random +// order. // -// If Rand r is nil, the method creates a new source and generator for -// one-time use. +// If Rand r is nil, the rand package default shared source is used. // // Return value ma is the number of arcs retained in the result graph. -func KroneckerDirected(scale uint, arcFactor float64, r *rand.Rand) (g Directed, ma int) { - a, m := kronecker(scale, arcFactor, true, r) +func KroneckerDirected(scale uint, arcFactor float64, rr *rand.Rand) (g Directed, ma int) { + a, m := kronecker(scale, arcFactor, true, rr) return Directed{a}, m } @@ -240,15 +622,15 @@ func KroneckerDirected(scale uint, arcFactor float64, r *rand.Rand) (g Directed, // necessarily fully connected. The number of of nodes will be <= 2^scale, // and will be near 2^scale for typical values of edgeFactor, >= 2. // EdgeFactor * 2^scale edges are generated, although loops and duplicate edges -// are rejected. +// are rejected. In the arc list for each node, to-nodes are in random +// order. // -// If Rand r is nil, the method creates a new source and generator for -// one-time use. +// If Rand r is nil, the rand package default shared source is used. // // Return value m is the true number of edges--not arcs--retained in the result // graph. -func KroneckerUndirected(scale uint, edgeFactor float64, r *rand.Rand) (g Undirected, m int) { - al, s := kronecker(scale, edgeFactor, false, r) +func KroneckerUndirected(scale uint, edgeFactor float64, rr *rand.Rand) (g Undirected, m int) { + al, s := kronecker(scale, edgeFactor, false, rr) return Undirected{al}, s } @@ -257,28 +639,29 @@ func KroneckerUndirected(scale uint, edgeFactor float64, r *rand.Rand) (g Undire // here is meaningful or not. // // note mma returns arc size ma for dir=true, but returns size m for dir=false -func kronecker(scale uint, edgeFactor float64, dir bool, r *rand.Rand) (g AdjacencyList, mma int) { - if r == nil { - r = rand.New(rand.NewSource(time.Now().UnixNano())) +func kronecker(scale uint, edgeFactor float64, dir bool, rr *rand.Rand) (g AdjacencyList, mma int) { + rf, ri, rp := rand.Float64, rand.Intn, rand.Perm + if rr != nil { + rf, ri, rp = rr.Float64, rr.Intn, rr.Perm } - N := NI(1 << scale) // node extent + N := 1 << scale // node extent M := int(edgeFactor*float64(N) + .5) // number of arcs/edges to generate a, b, c := 0.57, 0.19, 0.19 // initiator probabilities ab := a + b cNorm := c / (1 - ab) aNorm := a / ab ij := make([][2]NI, M) - var bm Bits + bm := bits.New(N) var nNodes int for k := range ij { - var i, j NI - for b := NI(1); b < N; b <<= 1 { - if r.Float64() > ab { + var i, j int + for b := 1; b < N; b <<= 1 { + if rf() > ab { i |= b - if r.Float64() > cNorm { + if rf() > cNorm { j |= b } - } else if r.Float64() > aNorm { + } else if rf() > aNorm { j |= b } } @@ -290,15 +673,15 @@ func kronecker(scale uint, edgeFactor float64, dir bool, r *rand.Rand) (g Adjace bm.SetBit(j, 1) nNodes++ } - r := r.Intn(k + 1) // shuffle edges as they are generated + r := ri(k + 1) // shuffle edges as they are generated ij[k] = ij[r] - ij[r] = [2]NI{i, j} + ij[r] = [2]NI{NI(i), NI(j)} } - p := r.Perm(nNodes) // mapping to shuffle IDs of non-isolated nodes + p := rp(nNodes) // mapping to shuffle IDs of non-isolated nodes px := 0 rn := make([]NI, N) for i := range rn { - if bm.Bit(NI(i)) == 1 { + if bm.Bit(i) == 1 { rn[i] = NI(p[px]) // fill lookup table px++ } diff --git a/vendor/github.com/soniakeys/graph/readme.adoc b/vendor/github.com/soniakeys/graph/readme.adoc new file mode 100644 index 00000000..df771a5b --- /dev/null +++ b/vendor/github.com/soniakeys/graph/readme.adoc @@ -0,0 +1,50 @@ += Graph + +A graph library with goals of speed and simplicity, Graph implements +graph algorithms on graphs of zero-based integer node IDs. + +image:https://godoc.org/github.com/soniakeys/graph?status.svg[link=https://godoc.org/github.com/soniakeys/graph] +image:http://gowalker.org/api/v1/badge[link=https://gowalker.org/github.com/soniakeys/graph] +image:http://go-search.org/badge?id=github.com%2Fsoniakeys%2Fgraph[link=http://go-search.org/view?id=github.com%2Fsoniakeys%2Fgraph] +image:https://travis-ci.org/soniakeys/graph.svg?branch=master[link=https://travis-ci.org/soniakeys/graph] + +The library provides efficient graph representations and many methods on +graph types. It can be imported and used directly in many applications that +require or can benefit from graph algorithms. + +The library should also be considered as library of source code that can serve +as starting material for coding variant or more complex algorithms. + +== Ancillary material of interest + +The directory link:tutorials[tutorials] is a work in progress - there are only +a few tutorials there yet - but the concept is to provide some topical +walk-throughs to supplement godoc. The source-based godoc documentation +remains the primary documentation. + +The directory link:anecdote[anecdote] contains a stand-alone program that +performs single runs of a number of methods, collecting one-off or "anecdotal" +timings. It currently runs only a small fraction of the library methods but +may still be of interest for giving a general idea of how fast some methods +run. + +The directory link:bench[bench] is another work in progress. The concept is +to present some plots showing benchmark performance approaching some +theoretical asymptote. + +link:hacking.adoc[hacking.adoc] has some information about how the library is +developed, built, and tested. It might be of interest if for example you +plan to fork or contribute to the the repository. + +== Test coverage +1 Jul 2017 +.... +graph 93.7% +graph/alt 88.0% +graph/dot 77.7% +graph/treevis 79.4% +.... + +== License +All files in the repository are licensed with the MIT License, +https://opensource.org/licenses/MIT. diff --git a/vendor/github.com/soniakeys/graph/readme.md b/vendor/github.com/soniakeys/graph/readme.md deleted file mode 100644 index 539670ff..00000000 --- a/vendor/github.com/soniakeys/graph/readme.md +++ /dev/null @@ -1,38 +0,0 @@ -#Graph - -A graph library with goals of speed and simplicity, Graph implements -graph algorithms on graphs of zero-based integer node IDs. - -[![GoDoc](https://godoc.org/github.com/soniakeys/graph?status.svg)](https://godoc.org/github.com/soniakeys/graph) [![Go Walker](http://gowalker.org/api/v1/badge)](https://gowalker.org/github.com/soniakeys/graph) [![GoSearch](http://go-search.org/badge?id=github.com%2Fsoniakeys%2Fgraph)](http://go-search.org/view?id=github.com%2Fsoniakeys%2Fgraph)[![Build Status](https://travis-ci.org/soniakeys/graph.svg?branch=master)](https://travis-ci.org/soniakeys/graph) - -Status, 4 Apr 2016: The repo has benefitted recently from being included -in another package. In response to users of that package, this repo now -builds for 32 bit Windows and ARM, and for Go versions back to 1.2.1. -Thank you all who have filed issues. - -###Non-source files of interest - -The directory [tutorials](tutorials) is a work in progress - there are only -a couple of tutorials there yet - but the concept is to provide some topical -walk-throughs to supplement godoc. The source-based godoc documentation -remains the primary documentation. - -* [Dijkstra's algorithm](tutorials/dijkstra.md) -* [AdjacencyList types](tutorials/adjacencylist.md) - -The directory [bench](bench) is another work in progress. The concept is -to present some plots showing benchmark performance approaching some -theoretical asymptote. - -[hacking.md](hacking.md) has some information about how the library is -developed, built, and tested. It might be of interest if for example you -plan to fork or contribute to the the repository. - -###Test coverage -8 Apr 2016 -``` -graph 95.3% -graph/df 20.7% -graph/dot 77.5% -graph/treevis 79.4% -``` diff --git a/vendor/github.com/soniakeys/graph/sssp.go b/vendor/github.com/soniakeys/graph/sssp.go index 32cc192e..88e9d517 100644 --- a/vendor/github.com/soniakeys/graph/sssp.go +++ b/vendor/github.com/soniakeys/graph/sssp.go @@ -7,6 +7,8 @@ import ( "container/heap" "fmt" "math" + + "github.com/soniakeys/bits" ) // rNode holds data for a "reached" node @@ -60,7 +62,7 @@ func (h Heuristic) Admissible(g LabeledAdjacencyList, w WeightFunc, end NI) (boo // run dijkstra // Dijkstra.AllPaths takes a start node but after inverting the graph // argument end now represents the start node of the inverted graph. - f, dist, _ := inv.Dijkstra(end, -1, w) + f, _, dist, _ := inv.Dijkstra(end, -1, w) // compare h to found shortest paths for n := range inv { if f.Paths[n].Len == 0 { @@ -204,9 +206,9 @@ func (g LabeledAdjacencyList) AStarA(w WeightFunc, start, end NI, h Heuristic) ( // // If a path is found, the non-nil node path is returned with the total path // distance. Otherwise the returned path will be nil. -func (g LabeledAdjacencyList) AStarAPath(start, end NI, h Heuristic, w WeightFunc) ([]NI, float64) { - f, _, d, _ := g.AStarA(w, start, end, h) - return f.PathTo(end, nil), d +func (g LabeledAdjacencyList) AStarAPath(start, end NI, h Heuristic, w WeightFunc) (LabeledPath, float64) { + f, labels, d, _ := g.AStarA(w, start, end, h) + return f.PathToLabeled(end, labels, nil), d } // AStarM is AStarA optimized for monotonic heuristic estimates. @@ -305,9 +307,9 @@ func (g LabeledAdjacencyList) AStarM(w WeightFunc, start, end NI, h Heuristic) ( // // If a path is found, the non-nil node path is returned with the total path // distance. Otherwise the returned path will be nil. -func (g LabeledAdjacencyList) AStarMPath(start, end NI, h Heuristic, w WeightFunc) ([]NI, float64) { - f, _, d, _ := g.AStarM(w, start, end, h) - return f.PathTo(end, nil), d +func (g LabeledAdjacencyList) AStarMPath(start, end NI, h Heuristic, w WeightFunc) (LabeledPath, float64) { + f, labels, d, _ := g.AStarM(w, start, end, h) + return f.PathToLabeled(end, labels, nil), d } // implement container/heap @@ -342,8 +344,8 @@ func (p *openHeap) Pop() interface{} { // Loops and parallel arcs are allowed. // // If the algorithm completes without encountering a negative cycle the method -// returns shortest paths encoded in a FromList, path distances indexed by -// node, and return value end = -1. +// returns shortest paths encoded in a FromList, labels and path distances +// indexed by node, and return value end = -1. // // If it encounters a negative cycle reachable from start it returns end >= 0. // In this case the cycle can be obtained by calling f.BellmanFordCycle(end). @@ -352,11 +354,13 @@ func (p *openHeap) Pop() interface{} { // cycle not reachable from start will not prevent the algorithm from finding // shortest paths from start. // -// See also NegativeCycle to find a cycle anywhere in the graph, and see +// See also NegativeCycle to find a cycle anywhere in the graph, see +// NegativeCycles for enumerating all negative cycles, and see // HasNegativeCycle for lighter-weight negative cycle detection, -func (g LabeledDirected) BellmanFord(w WeightFunc, start NI) (f FromList, dist []float64, end NI) { +func (g LabeledDirected) BellmanFord(w WeightFunc, start NI) (f FromList, labels []LI, dist []float64, end NI) { a := g.LabeledAdjacencyList f = NewFromList(len(a)) + labels = make([]LI, len(a)) dist = make([]float64, len(a)) inf := math.Inf(1) for i := range dist { @@ -376,6 +380,7 @@ func (g LabeledDirected) BellmanFord(w WeightFunc, start NI) (f FromList, dist [ // TODO improve to break ties if fp.Len > 0 && d2 < dist[nb.To] { *to = PathEnd{From: NI(from), Len: fp.Len + 1} + labels[nb.To] = nb.Label dist[nb.To] = d2 imp = true } @@ -390,11 +395,11 @@ func (g LabeledDirected) BellmanFord(w WeightFunc, start NI) (f FromList, dist [ for _, nb := range nbs { if d1+w(nb.Label) < dist[nb.To] { // return nb as end of a path with negative cycle at root - return f, dist, NI(from) + return f, labels, dist, NI(from) } } } - return f, dist, -1 + return f, labels, dist, -1 } // BellmanFordCycle decodes a negative cycle detected by BellmanFord. @@ -402,14 +407,14 @@ func (g LabeledDirected) BellmanFord(w WeightFunc, start NI) (f FromList, dist [ // Receiver f and argument end must be results returned from BellmanFord. func (f FromList) BellmanFordCycle(end NI) (c []NI) { p := f.Paths - var b Bits - for b.Bit(end) == 0 { - b.SetBit(end, 1) + b := bits.New(len(p)) + for b.Bit(int(end)) == 0 { + b.SetBit(int(end), 1) end = p[end].From } - for b.Bit(end) == 1 { + for b.Bit(int(end)) == 1 { c = append(c, end) - b.SetBit(end, 0) + b.SetBit(int(end), 0) end = p[end].From } for i, j := 0, len(c)-1; i < j; i, j = i+1, j-1 { @@ -424,8 +429,9 @@ func (f FromList) BellmanFordCycle(end NI) (c []NI) { // cycles anywhere in the graph. Also path information is not computed, // reducing memory use somewhat compared to BellmanFord. // -// See also NegativeCycle to obtain the cycle, and see BellmanFord for -// single source shortest path searches. +// See also NegativeCycle to obtain the cycle, see NegativeCycles for +// enumerating all negative cycles, and see BellmanFord for single source +// shortest path searches with negative cycle detection. func (g LabeledDirected) HasNegativeCycle(w WeightFunc) bool { a := g.LabeledAdjacencyList dist := make([]float64, len(a)) @@ -462,15 +468,18 @@ func (g LabeledDirected) HasNegativeCycle(w WeightFunc) bool { // cycles anywhere in the graph. If a negative cycle exists, one will be // returned. The result is nil if no negative cycle exists. // -// See also HasNegativeCycle for lighter-weight cycle detection, and see -// BellmanFord for single source shortest paths. -func (g LabeledDirected) NegativeCycle(w WeightFunc) (c []NI) { +// See also NegativeCycles for enumerating all negative cycles, see +// HasNegativeCycle for lighter-weight cycle detection, and see +// BellmanFord for single source shortest paths, also with negative cycle +// detection. +func (g LabeledDirected) NegativeCycle(w WeightFunc) (c []Half) { a := g.LabeledAdjacencyList f := NewFromList(len(a)) p := f.Paths for n := range p { p[n] = PathEnd{From: -1, Len: 1} } + labels := make([]LI, len(a)) dist := make([]float64, len(a)) for _ = range a { imp := false @@ -482,6 +491,7 @@ func (g LabeledDirected) NegativeCycle(w WeightFunc) (c []NI) { to := &p[nb.To] if fp.Len > 0 && d2 < dist[nb.To] { *to = PathEnd{From: NI(from), Len: fp.Len + 1} + labels[nb.To] = nb.Label dist[nb.To] = d2 imp = true } @@ -491,26 +501,26 @@ func (g LabeledDirected) NegativeCycle(w WeightFunc) (c []NI) { return nil } } - var vis Bits + vis := bits.New(len(a)) a: for n := range a { - end := NI(n) - var b Bits + end := n + b := bits.New(len(a)) for b.Bit(end) == 0 { if vis.Bit(end) == 1 { continue a } vis.SetBit(end, 1) b.SetBit(end, 1) - end = p[end].From + end = int(p[end].From) if end < 0 { continue a } } for b.Bit(end) == 1 { - c = append(c, end) + c = append(c, Half{NI(end), labels[end]}) b.SetBit(end, 0) - end = p[end].From + end = int(p[end].From) } for i, j := 0, len(c)-1; i < j; i, j = i+1, j-1 { c[i], c[j] = c[j], c[i] @@ -520,147 +530,6 @@ a: return nil // no negative cycle } -// A NodeVisitor is an argument to some graph traversal methods. -// -// Graph traversal methods call the visitor function for each node visited. -// Argument n is the node being visited. -type NodeVisitor func(n NI) - -// An OkNodeVisitor function is an argument to some graph traversal methods. -// -// Graph traversal methods call the visitor function for each node visited. -// The argument n is the node being visited. If the visitor function -// returns true, the traversal will continue. If the visitor function -// returns false, the traversal will terminate immediately. -type OkNodeVisitor func(n NI) (ok bool) - -// BreadthFirst2 traverses a graph breadth first using a direction -// optimizing algorithm. -// -// The code is experimental and currently seems no faster than the -// conventional breadth first code. -// -// Use AdjacencyList.BreadthFirst instead. -func BreadthFirst2(g, tr AdjacencyList, ma int, start NI, f *FromList, v OkNodeVisitor) int { - if tr == nil { - var d Directed - d, ma = Directed{g}.Transpose() - tr = d.AdjacencyList - } - switch { - case f == nil: - e := NewFromList(len(g)) - f = &e - case f.Paths == nil: - *f = NewFromList(len(g)) - } - if ma <= 0 { - ma = g.ArcSize() - } - rp := f.Paths - level := 1 - rp[start] = PathEnd{Len: level, From: -1} - if !v(start) { - f.MaxLen = level - return -1 - } - nReached := 1 // accumulated for a return value - // the frontier consists of nodes all at the same level - frontier := []NI{start} - mf := len(g[start]) // number of arcs leading out from frontier - ctb := ma / 10 // threshold change from top-down to bottom-up - k14 := 14 * ma / len(g) // 14 * mean degree - cbt := len(g) / k14 // threshold change from bottom-up to top-down - // var fBits, nextb big.Int - fBits := make([]bool, len(g)) - nextb := make([]bool, len(g)) - zBits := make([]bool, len(g)) - for { - // top down step - level++ - var next []NI - for _, n := range frontier { - for _, nb := range g[n] { - if rp[nb].Len == 0 { - rp[nb] = PathEnd{From: n, Len: level} - if !v(nb) { - f.MaxLen = level - return -1 - } - next = append(next, nb) - nReached++ - } - } - } - if len(next) == 0 { - break - } - frontier = next - if mf > ctb { - // switch to bottom up! - } else { - // stick with top down - continue - } - // convert frontier representation - nf := 0 // number of vertices on the frontier - for _, n := range frontier { - // fBits.SetBit(&fBits, n, 1) - fBits[n] = true - nf++ - } - bottomUpLoop: - level++ - nNext := 0 - for n := range tr { - if rp[n].Len == 0 { - for _, nb := range tr[n] { - // if fBits.Bit(nb) == 1 { - if fBits[nb] { - rp[n] = PathEnd{From: nb, Len: level} - if !v(nb) { - f.MaxLen = level - return -1 - } - // nextb.SetBit(&nextb, n, 1) - nextb[n] = true - nReached++ - nNext++ - break - } - } - } - } - if nNext == 0 { - break - } - fBits, nextb = nextb, fBits - // nextb.SetInt64(0) - copy(nextb, zBits) - nf = nNext - if nf < cbt { - // switch back to top down! - } else { - // stick with bottom up - goto bottomUpLoop - } - // convert frontier representation - mf = 0 - frontier = frontier[:0] - for n := range g { - // if fBits.Bit(n) == 1 { - if fBits[n] { - frontier = append(frontier, NI(n)) - mf += len(g[n]) - fBits[n] = false - } - } - // fBits.SetInt64(0) - } - f.MaxLen = level - 1 - return nReached -} - // DAGMinDistPath finds a single shortest path. // // Shortest means minimum sum of arc weights. @@ -668,7 +537,7 @@ func BreadthFirst2(g, tr AdjacencyList, ma int, start NI, f *FromList, v OkNodeV // Returned is the path and distance as returned by FromList.PathTo. // // This is a convenience method. See DAGOptimalPaths for more options. -func (g LabeledDirected) DAGMinDistPath(start, end NI, w WeightFunc) ([]NI, float64, error) { +func (g LabeledDirected) DAGMinDistPath(start, end NI, w WeightFunc) (LabeledPath, float64, error) { return g.dagPath(start, end, w, false) } @@ -679,20 +548,20 @@ func (g LabeledDirected) DAGMinDistPath(start, end NI, w WeightFunc) ([]NI, floa // Returned is the path and distance as returned by FromList.PathTo. // // This is a convenience method. See DAGOptimalPaths for more options. -func (g LabeledDirected) DAGMaxDistPath(start, end NI, w WeightFunc) ([]NI, float64, error) { +func (g LabeledDirected) DAGMaxDistPath(start, end NI, w WeightFunc) (LabeledPath, float64, error) { return g.dagPath(start, end, w, true) } -func (g LabeledDirected) dagPath(start, end NI, w WeightFunc, longest bool) ([]NI, float64, error) { +func (g LabeledDirected) dagPath(start, end NI, w WeightFunc, longest bool) (LabeledPath, float64, error) { o, _ := g.Topological() if o == nil { - return nil, 0, fmt.Errorf("not a DAG") + return LabeledPath{}, 0, fmt.Errorf("not a DAG") } - f, dist, _ := g.DAGOptimalPaths(start, end, o, w, longest) + f, labels, dist, _ := g.DAGOptimalPaths(start, end, o, w, longest) if f.Paths[end].Len == 0 { - return nil, 0, fmt.Errorf("no path from %d to %d", start, end) + return LabeledPath{}, 0, fmt.Errorf("no path from %d to %d", start, end) } - return f.PathTo(end, nil), dist[end], nil + return f.PathToLabeled(end, labels, nil), dist[end], nil } // DAGOptimalPaths finds either longest or shortest distance paths in a @@ -712,11 +581,13 @@ func (g LabeledDirected) dagPath(start, end NI, w WeightFunc, longest bool) ([]N // is a valid node number, the method returns as soon as the optimal path // to end is found. If end is -1, all optimal paths from start are found. // -// Paths and path distances are encoded in the returned FromList and dist -// slice. The number of nodes reached is returned as nReached. -func (g LabeledDirected) DAGOptimalPaths(start, end NI, ordering []NI, w WeightFunc, longest bool) (f FromList, dist []float64, nReached int) { +// Paths and path distances are encoded in the returned FromList, labels, +// and dist slices. The number of nodes reached is returned as nReached. +func (g LabeledDirected) DAGOptimalPaths(start, end NI, ordering []NI, w WeightFunc, longest bool) (f FromList, labels []LI, dist []float64, nReached int) { a := g.LabeledAdjacencyList f = NewFromList(len(a)) + f.Leaves = bits.New(len(a)) + labels = make([]LI, len(a)) dist = make([]float64, len(a)) if ordering == nil { ordering, _ = g.Topological() @@ -739,14 +610,14 @@ func (g LabeledDirected) DAGOptimalPaths(start, end NI, ordering []NI, w WeightF p[start] = PathEnd{From: -1, Len: 1} f.MaxLen = 1 leaves := &f.Leaves - leaves.SetBit(start, 1) + leaves.SetBit(int(start), 1) nReached = 1 for n := start; n != end; n = ordering[o] { if p[n].Len > 0 && len(a[n]) > 0 { nDist := dist[n] candLen := p[n].Len + 1 // len for any candidate arc followed from n for _, to := range a[n] { - leaves.SetBit(to.To, 1) + leaves.SetBit(int(to.To), 1) candDist := nDist + w(to.Label) switch { case p[to.To].Len == 0: // first path to node to.To @@ -758,11 +629,12 @@ func (g LabeledDirected) DAGOptimalPaths(start, end NI, ordering []NI, w WeightF } dist[to.To] = candDist p[to.To] = PathEnd{From: n, Len: candLen} + labels[to.To] = to.Label if candLen > f.MaxLen { f.MaxLen = candLen } } - leaves.SetBit(n, 0) + leaves.SetBit(int(n), 0) } o++ if o == len(ordering) { @@ -781,12 +653,17 @@ func (g LabeledDirected) DAGOptimalPaths(start, end NI, ordering []NI, w WeightF // As usual for Dijkstra's algorithm, arc weights must be non-negative. // Graphs may be directed or undirected. Loops and parallel arcs are // allowed. -func (g LabeledAdjacencyList) Dijkstra(start, end NI, w WeightFunc) (f FromList, dist []float64, reached int) { +// +// Paths and path distances are encoded in the returned FromList and dist +// slice. Returned labels are the labels of arcs followed to each node. +// The number of nodes reached is returned as nReached. +func (g LabeledAdjacencyList) Dijkstra(start, end NI, w WeightFunc) (f FromList, labels []LI, dist []float64, nReached int) { r := make([]tentResult, len(g)) for i := range r { r[i].nx = NI(i) } f = NewFromList(len(g)) + labels = make([]LI, len(g)) dist = make([]float64, len(g)) current := start rp := f.Paths @@ -822,6 +699,7 @@ func (g LabeledAdjacencyList) Dijkstra(start, end NI, w WeightFunc) (f FromList, hr.dist = dist rp[nb.To].Len = nextLen rp[nb.To].From = current + labels[nb.To] = nb.Label if visited { heap.Fix(&t, hr.fx) } else { @@ -830,7 +708,8 @@ func (g LabeledAdjacencyList) Dijkstra(start, end NI, w WeightFunc) (f FromList, } //d.ndVis++ if len(t) == 0 { - return f, dist, nDone // no more reachable nodes. AllPaths normal return + // no more reachable nodes. AllPaths normal return + return f, labels, dist, nDone } // new current is node with smallest tentative distance cr = heap.Pop(&t).(*tentResult) @@ -840,15 +719,16 @@ func (g LabeledAdjacencyList) Dijkstra(start, end NI, w WeightFunc) (f FromList, dist[current] = cr.dist // store final distance } // normal return for single shortest path search - return f, dist, -1 + return f, labels, dist, -1 } // DijkstraPath finds a single shortest path. // -// Returned is the path and distance as returned by FromList.PathTo. -func (g LabeledAdjacencyList) DijkstraPath(start, end NI, w WeightFunc) ([]NI, float64) { - f, dist, _ := g.Dijkstra(start, end, w) - return f.PathTo(end, nil), dist[end] +// Returned is the path as returned by FromList.LabeledPathTo and the total +// path distance. +func (g LabeledAdjacencyList) DijkstraPath(start, end NI, w WeightFunc) (LabeledPath, float64) { + f, labels, dist, _ := g.Dijkstra(start, end, w) + return f.PathToLabeled(end, labels, nil), dist[end] } // tent implements container/heap diff --git a/vendor/github.com/soniakeys/graph/travis.sh b/vendor/github.com/soniakeys/graph/travis.sh deleted file mode 100644 index 5a8030ac..00000000 --- a/vendor/github.com/soniakeys/graph/travis.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -set -ex -go test ./... -if [ "$TRAVIS_GO_VERSION" = "1.6" ]; then - GOARCH=386 go test ./... - go tool vet -example . - go get github.com/client9/misspell/cmd/misspell - go get github.com/soniakeys/vetc - misspell -error * */* */*/* - vetc -fi diff --git a/vendor/github.com/soniakeys/graph/undir.go b/vendor/github.com/soniakeys/graph/undir.go index 75a7f248..21ef7b59 100644 --- a/vendor/github.com/soniakeys/graph/undir.go +++ b/vendor/github.com/soniakeys/graph/undir.go @@ -6,7 +6,11 @@ package graph // undir.go has methods specific to undirected graphs, Undirected and // LabeledUndirected. -import "errors" +import ( + "fmt" + + "github.com/soniakeys/bits" +) // AddEdge adds an edge to a graph. // @@ -18,6 +22,10 @@ import "errors" // The pointer receiver allows the method to expand the graph as needed // to include the values n1 and n2. If n1 or n2 happen to be greater than // len(*p) the method does not panic, but simply expands the graph. +// +// If you know or can compute the final graph order however, consider +// preallocating to avoid any overhead of expanding the graph. +// See second example, "More". func (p *Undirected) AddEdge(n1, n2 NI) { // Similar code in LabeledAdjacencyList.AddEdge. @@ -41,34 +49,238 @@ func (p *Undirected) AddEdge(n1, n2 NI) { } } -// EulerianCycleD for undirected graphs is a bit of an experiment. -// -// It is about the same as the directed version, but modified for an undirected -// multigraph. -// -// Parameter m in this case must be the size of the undirected graph -- the -// number of edges. Use Undirected.Size if the size is unknown. -// -// It works, but contains an extra loop that I think spoils the time -// complexity. Probably still pretty fast in practice, but a different -// graph representation might be better. -func (g Undirected) EulerianCycleD(m int) ([]NI, error) { - if len(g.AdjacencyList) == 0 { - return nil, nil - } - e := newEulerian(g.AdjacencyList, m) - for e.s >= 0 { - v := e.top() - e.pushUndir() // call modified method - if e.top() != v { - return nil, errors.New("not balanced") +// RemoveEdge removes a single edge between nodes n1 and n2. +// +// It removes reciprocal arcs in the case of distinct n1 and n2 or removes +// a single arc loop in the case of n1 == n2. +// +// Returns true if the specified edge is found and successfully removed, +// false if the edge does not exist. +func (g Undirected) RemoveEdge(n1, n2 NI) (ok bool) { + ok, x1, x2 := g.HasEdge(n1, n2) + if !ok { + return + } + a := g.AdjacencyList + to := a[n1] + last := len(to) - 1 + to[x1] = to[last] + a[n1] = to[:last] + if n1 == n2 { + return + } + to = a[n2] + last = len(to) - 1 + to[x2] = to[last] + a[n2] = to[:last] + return +} + +// ArcDensity returns density for a simple directed graph. +// +// Parameter n is order, or number of nodes of a simple directed graph. +// Parameter a is the arc size, or number of directed arcs. +// +// Returned density is the fraction `a` over the total possible number of arcs +// or a / (n * (n-1)). +// +// See also Density for density of a simple undirected graph. +// +// See also the corresponding methods AdjacencyList.ArcDensity and +// LabeledAdjacencyList.ArcDensity. +func ArcDensity(n, a int) float64 { + return float64(a) / (float64(n) * float64(n-1)) +} + +// Density returns density for a simple undirected graph. +// +// Parameter n is order, or number of nodes of a simple undirected graph. +// Parameter m is the size, or number of undirected edges. +// +// Returned density is the fraction m over the total possible number of edges +// or m / ((n * (n-1))/2). +// +// See also ArcDensity for simple directed graphs. +// +// See also the corresponding methods AdjacencyList.Density and +// LabeledAdjacencyList.Density. +func Density(n, m int) float64 { + return float64(m) * 2 / (float64(n) * float64(n-1)) +} + +// An EdgeVisitor is an argument to some traversal methods. +// +// Traversal methods call the visitor function for each edge visited. +// Argument e is the edge being visited. +type EdgeVisitor func(e Edge) + +// Edges iterates over the edges of an undirected graph. +// +// Edge visitor v is called for each edge of the graph. That is, it is called +// once for each reciprocal arc pair and once for each loop. +// +// See also LabeledUndirected.Edges for a labeled version. +// See also Undirected.SimpleEdges for a version that emits only the simple +// subgraph. +func (g Undirected) Edges(v EdgeVisitor) { + a := g.AdjacencyList + unpaired := make(AdjacencyList, len(a)) + for fr, to := range a { + arc: // for each arc in a + for _, to := range to { + if to == NI(fr) { + v(Edge{NI(fr), to}) // output loop + continue + } + // search unpaired arcs + ut := unpaired[to] + for i, u := range ut { + if u == NI(fr) { // found reciprocal + v(Edge{u, to}) // output edge + last := len(ut) - 1 + ut[i] = ut[last] + unpaired[to] = ut[:last] + continue arc + } + } + // reciprocal not found + unpaired[fr] = append(unpaired[fr], to) + } + } + // undefined behavior is that unpaired arcs are silently ignored. +} + +// FromList builds a forest with a tree spanning each connected component. +// +// For each component a root is chosen and spanning is done with the method +// Undirected.SpanTree, and so is breadth-first. Returned is a FromList with +// all spanned trees, a list of roots chosen, and a bool indicating if the +// receiver graph g was found to be a simple graph connected as a forest. +// Any cycles, loops, or parallel edges in any component will cause +// simpleForest to be false, but FromList f will still be populated with +// a valid and complete spanning forest. +func (g Undirected) FromList() (f FromList, roots []NI, simpleForest bool) { + p := make([]PathEnd, g.Order()) + for i := range p { + p[i].From = -1 + } + f.Paths = p + simpleForest = true + ts := 0 + for n := range g.AdjacencyList { + if p[n].From >= 0 { + continue } - e.keep() + roots = append(roots, NI(n)) + ns, st := g.SpanTree(NI(n), &f) + if !st { + simpleForest = false + } + ts += ns + if ts == len(p) { + break + } + } + return +} + +// HasEdge returns true if g has any edge between nodes n1 and n2. +// +// Also returned are indexes x1 and x2 such that g[n1][x1] == n2 +// and g[n2][x2] == n1. If no edge between n1 and n2 is present HasArc +// returns `has` == false. +// +// See also HasArc. If you are interested only in the boolean result and +// g is a well formed (passes IsUndirected) then HasArc is an adequate test. +func (g Undirected) HasEdge(n1, n2 NI) (has bool, x1, x2 int) { + if has, x1 = g.HasArc(n1, n2); !has { + return has, x1, x1 } - if !e.uv.Zero() { - return nil, errors.New("not strongly connected") + has, x2 = g.HasArc(n2, n1) + return +} + +// SimpleEdges iterates over the edges of the simple subgraph of an undirected +// graph. +// +// Edge visitor v is called for each pair of distinct nodes that is connected +// with an edge. That is, loops are ignored and parallel edges are reduced to +// a single edge. +// +// See also Undirected.Edges for a version that emits all edges. +func (g Undirected) SimpleEdges(v EdgeVisitor) { + for fr, to := range g.AdjacencyList { + e := bits.New(len(g.AdjacencyList)) + for _, to := range to { + if to > NI(fr) && e.Bit(int(to)) == 0 { + e.SetBit(int(to), 1) + v(Edge{NI(fr), to}) + } + } } - return e.p, nil + // undefined behavior is that unpaired arcs may or may not be emitted. +} + +// SpanTree builds a tree spanning a connected component. +// +// The component is spanned by breadth-first search from the given root. +// The resulting spanning tree in stored a FromList. +// +// If FromList.Paths is not the same length as g, it is allocated and +// initialized. This allows a zero value FromList to be passed as f. +// If FromList.Paths is the same length as g, it is used as is and is not +// reinitialized. This allows multiple trees to be spanned in the same +// FromList with successive calls. +// +// For nodes spanned, the Path member of the returned FromList is populated +// with both From and Len values. The MaxLen member will be updated but +// not Leaves. +// +// Returned is the number of nodes spanned, which will be the number of nodes +// in the component, and a bool indicating if the component was found to be a +// simply connected unrooted tree in the receiver graph g. Any cycles, loops, +// or parallel edges in the component will cause simpleTree to be false, but +// FromList f will still be populated with a valid and complete spanning tree. +func (g Undirected) SpanTree(root NI, f *FromList) (nSpanned int, simpleTree bool) { + a := g.AdjacencyList + p := f.Paths + if len(p) != len(a) { + p = make([]PathEnd, len(a)) + for i := range p { + p[i].From = -1 + } + f.Paths = p + } + simpleTree = true + p[root] = PathEnd{From: -1, Len: 1} + type arc struct { + from NI + half NI + } + var next []arc + frontier := []arc{{-1, root}} + for len(frontier) > 0 { + for _, fa := range frontier { // fa frontier arc + nSpanned++ + l := p[fa.half].Len + 1 + for _, to := range a[fa.half] { + if to == fa.from { + continue + } + if p[to].Len > 0 { + simpleTree = false + continue + } + p[to] = PathEnd{From: fa.half, Len: l} + if l > f.MaxLen { + f.MaxLen = l + } + next = append(next, arc{fa.half, to}) + } + } + frontier, next = next, frontier[:0] + } + return } // TarjanBiconnectedComponents decomposes a graph into maximal biconnected @@ -88,8 +300,9 @@ func (g Undirected) TarjanBiconnectedComponents(emit func([]Edge) bool) { // Note Tarjan's "adjacency structure" is graph.AdjacencyList, // His "adjacency list" is an element of a graph.AdjacencyList, also // termed a "to-list", "neighbor list", or "child list." - number := make([]int, len(g.AdjacencyList)) - lowpt := make([]int, len(g.AdjacencyList)) + a := g.AdjacencyList + number := make([]int, len(a)) + lowpt := make([]int, len(a)) var stack []Edge var i int var biconnect func(NI, NI) bool @@ -97,7 +310,7 @@ func (g Undirected) TarjanBiconnectedComponents(emit func([]Edge) bool) { i++ number[v] = i lowpt[v] = i - for _, w := range g.AdjacencyList[v] { + for _, w := range a[v] { if number[w] == 0 { stack = append(stack, Edge{v, w}) if !biconnect(w, v) { @@ -130,92 +343,82 @@ func (g Undirected) TarjanBiconnectedComponents(emit func([]Edge) bool) { } return true } - for w := range g.AdjacencyList { - if number[w] == 0 && !biconnect(NI(w), 0) { + for w := range a { + if number[w] == 0 && !biconnect(NI(w), -1) { return } } } -/* half-baked. Read the 72 paper. Maybe revisit at some point. -type BiconnectedComponents struct { - Graph AdjacencyList - Start int - Cuts big.Int // bitmap of node cuts - From []int // from-tree - Leaves []int // leaves of from-tree -} - -func NewBiconnectedComponents(g Undirected) *BiconnectedComponents { - return &BiconnectedComponents{ - Graph: g, - From: make([]int, len(g)), - } -} - -func (b *BiconnectedComponents) Find(start int) { - g := b.Graph - depth := make([]int, len(g)) - low := make([]int, len(g)) - // reset from any previous run - b.Cuts.SetInt64(0) - bf := b.From - for n := range bf { - bf[n] = -1 - } - b.Leaves = b.Leaves[:0] - d := 1 // depth. d > 0 means visited - depth[start] = d - low[start] = d - d++ - var df func(int, int) - df = func(from, n int) { - bf[n] = from - depth[n] = d - dn := d - l := d - d++ - cut := false - leaf := true - for _, nb := range g[n] { - if depth[nb] == 0 { - leaf = false - df(n, nb) - if low[nb] < l { - l = low[nb] +func (g Undirected) BlockCut(block func([]Edge) bool, cut func(NI) bool, isolated func(NI) bool) { + a := g.AdjacencyList + number := make([]int, len(a)) + lowpt := make([]int, len(a)) + var stack []Edge + var i, rc int + var biconnect func(NI, NI) bool + biconnect = func(v, u NI) bool { + i++ + number[v] = i + lowpt[v] = i + for _, w := range a[v] { + if number[w] == 0 { + if u < 0 { + rc++ } - if low[nb] >= dn { - cut = true + stack = append(stack, Edge{v, w}) + if !biconnect(w, v) { + return false + } + if lowpt[w] < lowpt[v] { + lowpt[v] = lowpt[w] + } + if lowpt[w] >= number[v] { + if u >= 0 && !cut(v) { + return false + } + var bcc []Edge + top := len(stack) - 1 + for number[stack[top].N1] >= number[w] { + bcc = append(bcc, stack[top]) + stack = stack[:top] + top-- + } + bcc = append(bcc, stack[top]) + stack = stack[:top] + top-- + if !block(bcc) { + return false + } + } + } else if number[w] < number[v] && w != u { + stack = append(stack, Edge{v, w}) + if number[w] < lowpt[v] { + lowpt[v] = number[w] } - } else if nb != from && depth[nb] < l { - l = depth[nb] } } - low[n] = l - if cut { - b.Cuts.SetBit(&b.Cuts, n, 1) - } - if leaf { - b.Leaves = append(b.Leaves, n) + if u < 0 && rc > 1 { + return cut(v) } - d-- - } - nbs := g[start] - if len(nbs) == 0 { - return + return true } - df(start, nbs[0]) - var rc uint - for _, nb := range nbs[1:] { - if depth[nb] == 0 { - rc = 1 - df(start, nb) + for w := range a { + if number[w] > 0 { + continue + } + if len(a[w]) == 0 { + if !isolated(NI(w)) { + return + } + continue + } + rc = 0 + if !biconnect(NI(w), -1) { + return } } - b.Cuts.SetBit(&b.Cuts, start, rc) - return } -*/ // AddEdge adds an edge to a labeled graph. // @@ -252,6 +455,247 @@ func (p *LabeledUndirected) AddEdge(e Edge, l LI) { } } +// A LabeledEdgeVisitor is an argument to some traversal methods. +// +// Traversal methods call the visitor function for each edge visited. +// Argument e is the edge being visited. +type LabeledEdgeVisitor func(e LabeledEdge) + +// Edges iterates over the edges of a labeled undirected graph. +// +// Edge visitor v is called for each edge of the graph. That is, it is called +// once for each reciprocal arc pair and once for each loop. +// +// See also Undirected.Edges for an unlabeled version. +// See also the more simplistic LabeledAdjacencyList.ArcsAsEdges. +func (g LabeledUndirected) Edges(v LabeledEdgeVisitor) { + // similar code in LabeledAdjacencyList.InUndirected + a := g.LabeledAdjacencyList + unpaired := make(LabeledAdjacencyList, len(a)) + for fr, to := range a { + arc: // for each arc in a + for _, to := range to { + if to.To == NI(fr) { + v(LabeledEdge{Edge{NI(fr), to.To}, to.Label}) // output loop + continue + } + // search unpaired arcs + ut := unpaired[to.To] + for i, u := range ut { + if u.To == NI(fr) && u.Label == to.Label { // found reciprocal + v(LabeledEdge{Edge{NI(fr), to.To}, to.Label}) // output edge + last := len(ut) - 1 + ut[i] = ut[last] + unpaired[to.To] = ut[:last] + continue arc + } + } + // reciprocal not found + unpaired[fr] = append(unpaired[fr], to) + } + } +} + +// FromList builds a forest with a tree spanning each connected component in g. +// +// A root is chosen and spanning is done with the LabeledUndirected.SpanTree +// method, and so is breadth-first. Returned is a FromList with all spanned +// trees, labels corresponding to arcs in f, +// a list of roots chosen, and a bool indicating if the receiver graph g was +// found to be a simple graph connected as a forest. Any cycles, loops, or +// parallel edges in any component will cause simpleForest to be false, but +// FromList f will still be populated with a valid and complete spanning forest. + +// FromList builds a forest with a tree spanning each connected component. +// +// For each component a root is chosen and spanning is done with the method +// Undirected.SpanTree, and so is breadth-first. Returned is a FromList with +// all spanned trees, labels corresponding to arcs in f, a list of roots +// chosen, and a bool indicating if the receiver graph g was found to be a +// simple graph connected as a forest. Any cycles, loops, or parallel edges +// in any component will cause simpleForest to be false, but FromList f will +// still be populated with a valid and complete spanning forest. +func (g LabeledUndirected) FromList() (f FromList, labels []LI, roots []NI, simpleForest bool) { + p := make([]PathEnd, g.Order()) + for i := range p { + p[i].From = -1 + } + f.Paths = p + labels = make([]LI, len(p)) + simpleForest = true + ts := 0 + for n := range g.LabeledAdjacencyList { + if p[n].From >= 0 { + continue + } + roots = append(roots, NI(n)) + ns, st := g.SpanTree(NI(n), &f, labels) + if !st { + simpleForest = false + } + ts += ns + if ts == len(p) { + break + } + } + return +} + +// SpanTree builds a tree spanning a connected component. +// +// The component is spanned by breadth-first search from the given root. +// The resulting spanning tree in stored a FromList, and arc labels optionally +// stored in a slice. +// +// If FromList.Paths is not the same length as g, it is allocated and +// initialized. This allows a zero value FromList to be passed as f. +// If FromList.Paths is the same length as g, it is used as is and is not +// reinitialized. This allows multiple trees to be spanned in the same +// FromList with successive calls. +// +// For nodes spanned, the Path member of returned FromList f is populated +// populated with both From and Len values. The MaxLen member will be +// updated but not Leaves. +// +// The labels slice will be populated only if it is same length as g. +// Nil can be passed for example if labels are not needed. +// +// Returned is the number of nodes spanned, which will be the number of nodes +// in the component, and a bool indicating if the component was found to be a +// simply connected unrooted tree in the receiver graph g. Any cycles, loops, +// or parallel edges in the component will cause simpleTree to be false, but +// FromList f will still be populated with a valid and complete spanning tree. +func (g LabeledUndirected) SpanTree(root NI, f *FromList, labels []LI) (nSpanned int, simple bool) { + a := g.LabeledAdjacencyList + p := f.Paths + if len(p) != len(a) { + p = make([]PathEnd, len(a)) + for i := range p { + p[i].From = -1 + } + f.Paths = p + } + simple = true + p[root].Len = 1 + type arc struct { + from NI + half Half + } + var next []arc + frontier := []arc{{-1, Half{root, -1}}} + for len(frontier) > 0 { + for _, fa := range frontier { // fa frontier arc + nSpanned++ + l := p[fa.half.To].Len + 1 + for _, to := range a[fa.half.To] { + if to.To == fa.from && to.Label == fa.half.Label { + continue + } + if p[to.To].Len > 0 { + simple = false + continue + } + p[to.To] = PathEnd{From: fa.half.To, Len: l} + if len(labels) == len(p) { + labels[to.To] = to.Label + } + if l > f.MaxLen { + f.MaxLen = l + } + next = append(next, arc{fa.half.To, to}) + } + } + frontier, next = next, frontier[:0] + } + return +} + +// HasEdge returns true if g has any edge between nodes n1 and n2. +// +// Also returned are indexes x1 and x2 such that g[n1][x1] == Half{n2, l} +// and g[n2][x2] == {n1, l} for some label l. If no edge between n1 and n2 +// exists, HasArc returns `has` == false. +// +// See also HasArc. If you are only interested in the boolean result then +// HasArc is an adequate test. +func (g LabeledUndirected) HasEdge(n1, n2 NI) (has bool, x1, x2 int) { + if has, x1 = g.HasArc(n1, n2); !has { + return has, x1, x1 + } + has, x2 = g.HasArcLabel(n2, n1, g.LabeledAdjacencyList[n1][x1].Label) + return +} + +// HasEdgeLabel returns true if g has any edge between nodes n1 and n2 with +// label l. +// +// Also returned are indexes x1 and x2 such that g[n1][x1] == Half{n2, l} +// and g[n2][x2] == Half{n1, l}. If no edge between n1 and n2 with label l +// is present HasArc returns `has` == false. +func (g LabeledUndirected) HasEdgeLabel(n1, n2 NI, l LI) (has bool, x1, x2 int) { + if has, x1 = g.HasArcLabel(n1, n2, l); !has { + return has, x1, x1 + } + has, x2 = g.HasArcLabel(n2, n1, l) + return +} + +// RemoveEdge removes a single edge between nodes n1 and n2. +// +// It removes reciprocal arcs in the case of distinct n1 and n2 or removes +// a single arc loop in the case of n1 == n2. +// +// If the specified edge is found and successfully removed, RemoveEdge returns +// true and the label of the edge removed. If no edge exists between n1 and n2, +// RemoveEdge returns false, 0. +func (g LabeledUndirected) RemoveEdge(n1, n2 NI) (ok bool, label LI) { + ok, x1, x2 := g.HasEdge(n1, n2) + if !ok { + return + } + a := g.LabeledAdjacencyList + to := a[n1] + label = to[x1].Label // return value + last := len(to) - 1 + to[x1] = to[last] + a[n1] = to[:last] + if n1 == n2 { + return + } + to = a[n2] + last = len(to) - 1 + to[x2] = to[last] + a[n2] = to[:last] + return +} + +// RemoveEdgeLabel removes a single edge between nodes n1 and n2 with label l. +// +// It removes reciprocal arcs in the case of distinct n1 and n2 or removes +// a single arc loop in the case of n1 == n2. +// +// Returns true if the specified edge is found and successfully removed, +// false if the edge does not exist. +func (g LabeledUndirected) RemoveEdgeLabel(n1, n2 NI, l LI) (ok bool) { + ok, x1, x2 := g.HasEdgeLabel(n1, n2, l) + if !ok { + return + } + a := g.LabeledAdjacencyList + to := a[n1] + last := len(to) - 1 + to[x1] = to[last] + a[n1] = to[:last] + if n1 == n2 { + return + } + to = a[n2] + last = len(to) - 1 + to[x2] = to[last] + a[n2] = to[:last] + return +} + // TarjanBiconnectedComponents decomposes a graph into maximal biconnected // components, components for which if any node were removed the component // would remain connected. @@ -262,17 +706,9 @@ func (p *LabeledUndirected) AddEdge(e Edge, l LI) { // // See also the eqivalent unlabeled TarjanBiconnectedComponents. func (g LabeledUndirected) TarjanBiconnectedComponents(emit func([]LabeledEdge) bool) { - // Implemented closely to pseudocode in "Depth-first search and linear - // graph algorithms", Robert Tarjan, SIAM J. Comput. Vol. 1, No. 2, - // June 1972. - // - // Note Tarjan's "adjacency structure" is graph.AdjacencyList, - // His "adjacency list" is an element of a graph.AdjacencyList, also - // termed a "to-list", "neighbor list", or "child list." - // - // Nearly identical code in undir.go. - number := make([]int, len(g.LabeledAdjacencyList)) - lowpt := make([]int, len(g.LabeledAdjacencyList)) + // Code nearly identical to unlabled version. + number := make([]int, g.Order()) + lowpt := make([]int, g.Order()) var stack []LabeledEdge var i int var biconnect func(NI, NI) bool @@ -314,8 +750,68 @@ func (g LabeledUndirected) TarjanBiconnectedComponents(emit func([]LabeledEdge) return true } for w := range g.LabeledAdjacencyList { - if number[w] == 0 && !biconnect(NI(w), 0) { + if number[w] == 0 && !biconnect(NI(w), -1) { return } } } + +func (e *eulerian) pushUndir() error { + for u := e.top(); ; { + e.uv.SetBit(int(u), 0) + arcs := e.g[u] + if len(arcs) == 0 { + return nil + } + w := arcs[0] + e.s++ + e.p[e.s] = w + e.g[u] = arcs[1:] // consume arc + // difference from directed counterpart in dir.go: + // as long as it's not a loop, consume reciprocal arc as well + if w != u { + a2 := e.g[w] + for x, rx := range a2 { + if rx == u { // here it is + last := len(a2) - 1 + a2[x] = a2[last] // someone else gets the seat + e.g[w] = a2[:last] // and it's gone. + goto l + } + } + return fmt.Errorf("graph not undirected. %d -> %d reciprocal not found", u, w) + } + l: + u = w + } +} + +func (e *labEulerian) pushUndir() error { + for u := e.top(); ; { + e.uv.SetBit(int(u.To), 0) + arcs := e.g[u.To] + if len(arcs) == 0 { + return nil + } + w := arcs[0] + e.s++ + e.p[e.s] = w + e.g[u.To] = arcs[1:] // consume arc + // difference from directed counterpart in dir.go: + // as long as it's not a loop, consume reciprocal arc as well + if w.To != u.To { + a2 := e.g[w.To] + for x, rx := range a2 { + if rx.To == u.To && rx.Label == w.Label { // here it is + last := len(a2) - 1 + a2[x] = a2[last] // someone else can have the seat + e.g[w.To] = a2[:last] // and it's gone. + goto l + } + } + return fmt.Errorf("graph not undirected. %d -> %v reciprocal not found", u.To, w) + } + l: + u = w + } +} diff --git a/vendor/github.com/soniakeys/graph/undir_RO.go b/vendor/github.com/soniakeys/graph/undir_RO.go index fd8e3778..7707daf1 100644 --- a/vendor/github.com/soniakeys/graph/undir_RO.go +++ b/vendor/github.com/soniakeys/graph/undir_RO.go @@ -3,40 +3,97 @@ package graph +import ( + "errors" + "fmt" + + "github.com/soniakeys/bits" +) + // undir_RO.go is code generated from undir_cg.go by directives in graph.go. // Editing undir_cg.go is okay. It is the code generation source. // DO NOT EDIT undir_RO.go. // The RO means read only and it is upper case RO to slow you down a bit // in case you start to edit the file. +//------------------- + +// Bipartite constructs an object indexing the bipartite structure of a graph. +// +// In a bipartite component, nodes can be partitioned into two sets, or +// "colors," such that every edge in the component goes from one set to the +// other. +// +// If the graph is bipartite, the method constructs and returns a new +// Bipartite object as b and returns ok = true. +// +// If the component is not bipartite, a representative odd cycle as oc and +// returns ok = false. +// +// In the case of a graph with mulitiple connected components, this method +// provides no control over the color orientation by component. See +// Undirected.BipartiteComponent if this control is needed. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Undirected) Bipartite() (b *Bipartite, oc []NI, ok bool) { + c1 := bits.New(g.Order()) + c2 := bits.New(g.Order()) + r, _, _ := g.ConnectedComponentReps() + // accumulate n2 number of zero bits in c2 as number of one bits in n1 + var n, n2 int + for _, r := range r { + ok, n, _, oc = g.BipartiteComponent(r, c1, c2) + if !ok { + return + } + n2 += n + } + return &Bipartite{g, c2, n2}, nil, true +} -// Bipartite determines if a connected component of an undirected graph -// is bipartite, a component where nodes can be partitioned into two sets -// such that every edge in the component goes from one set to the other. +// BipartiteComponent analyzes the bipartite structure of a connected component +// of an undirected graph. // -// Argument n can be any representative node of the component. +// In a bipartite component, nodes can be partitioned into two sets, or +// "colors," such that every edge in the component goes from one set to the +// other. // -// If the component is bipartite, Bipartite returns true and a two-coloring -// of the component. Each color set is returned as a bitmap. If the component -// is not bipartite, Bipartite returns false and a representative odd cycle. +// Argument n can be any representative node of the component to be analyzed. +// Arguments c1 and c2 must be separate bits.Bits objects constructed to be +// of length of the number of nodes of g. These bitmaps are used in the +// component traversal and the bits of the component must be zero when the +// method is called. +// +// If the component is bipartite, BipartiteComponent populates bitmaps +// c1 and c2 with the two-coloring of the component, always assigning the set +// with representative node n to bitmap c1. It returns b = true, +// and also returns the number of bits set in c1 and c2 as n1 and n2 +// respectively. +// +// If the component is not bipartite, BipartiteComponent returns b = false +// and a representative odd cycle as oc. +// +// See also method Bipartite. // // There are equivalent labeled and unlabeled versions of this method. -func (g Undirected) Bipartite(n NI) (b bool, c1, c2 Bits, oc []NI) { +func (g Undirected) BipartiteComponent(n NI, c1, c2 bits.Bits) (b bool, n1, n2 int, oc []NI) { + a := g.AdjacencyList b = true var open bool - var df func(n NI, c1, c2 *Bits) - df = func(n NI, c1, c2 *Bits) { - c1.SetBit(n, 1) - for _, nb := range g.AdjacencyList[n] { - if c1.Bit(nb) == 1 { + var df func(n NI, c1, c2 *bits.Bits, n1, n2 *int) + df = func(n NI, c1, c2 *bits.Bits, n1, n2 *int) { + c1.SetBit(int(n), 1) + *n1++ + for _, nb := range a[n] { + if c1.Bit(int(nb)) == 1 { b = false oc = []NI{nb, n} open = true return } - if c2.Bit(nb) == 1 { + if c2.Bit(int(nb)) == 1 { continue } - df(nb, c2, c1) + df(nb, c2, c1, n2, n1) if b { continue } @@ -50,11 +107,11 @@ func (g Undirected) Bipartite(n NI) (b bool, c1, c2 Bits, oc []NI) { return } } - df(n, &c1, &c2) + df(n, &c1, &c2, &n1, &n2) if b { - return b, c1, c2, nil + return b, n1, n2, nil } - return b, Bits{}, Bits{}, oc + return b, 0, 0, oc } // BronKerbosch1 finds maximal cliques in an undirected graph. @@ -74,44 +131,49 @@ func (g Undirected) Bipartite(n NI) (b bool, c1, c2 Bits, oc []NI) { // There are equivalent labeled and unlabeled versions of this method. // // See also more sophisticated variants BronKerbosch2 and BronKerbosch3. -func (g Undirected) BronKerbosch1(emit func([]NI) bool) { +func (g Undirected) BronKerbosch1(emit func(bits.Bits) bool) { a := g.AdjacencyList - var f func(R, P, X *Bits) bool - f = func(R, P, X *Bits) bool { + var f func(R, P, X bits.Bits) bool + f = func(R, P, X bits.Bits) bool { switch { - case !P.Zero(): - var r2, p2, x2 Bits - pf := func(n NI) bool { - r2.Set(*R) + case !P.AllZeros(): + r2 := bits.New(len(a)) + p2 := bits.New(len(a)) + x2 := bits.New(len(a)) + pf := func(n int) bool { + r2.Set(R) r2.SetBit(n, 1) - p2.Clear() - x2.Clear() + p2.ClearAll() + x2.ClearAll() for _, to := range a[n] { - if P.Bit(to) == 1 { - p2.SetBit(to, 1) + if P.Bit(int(to)) == 1 { + p2.SetBit(int(to), 1) } - if X.Bit(to) == 1 { - x2.SetBit(to, 1) + if X.Bit(int(to)) == 1 { + x2.SetBit(int(to), 1) } } - if !f(&r2, &p2, &x2) { + if !f(r2, p2, x2) { return false } P.SetBit(n, 0) X.SetBit(n, 1) return true } - if !P.Iterate(pf) { + if !P.IterateOnes(pf) { return false } - case X.Zero(): - return emit(R.Slice()) + case X.AllZeros(): + return emit(R) } return true } - var R, P, X Bits - P.SetAll(len(a)) - f(&R, &P, &X) + var R, P, X bits.Bits + R = bits.New(len(a)) + P = bits.New(len(a)) + X = bits.New(len(a)) + P.SetAll() + f(R, P, X) } // BKPivotMaxDegree is a strategy for BronKerbosch methods. @@ -124,20 +186,20 @@ func (g Undirected) BronKerbosch1(emit func([]NI) bool) { // in P. // // There are equivalent labeled and unlabeled versions of this method. -func (g Undirected) BKPivotMaxDegree(P, X *Bits) (p NI) { +func (g Undirected) BKPivotMaxDegree(P, X bits.Bits) (p NI) { // choose pivot u as highest degree node from P or X a := g.AdjacencyList maxDeg := -1 - P.Iterate(func(n NI) bool { // scan P + P.IterateOnes(func(n int) bool { // scan P if d := len(a[n]); d > maxDeg { - p = n + p = NI(n) maxDeg = d } return true }) - X.Iterate(func(n NI) bool { // scan X + X.IterateOnes(func(n int) bool { // scan X if d := len(a[n]); d > maxDeg { - p = n + p = NI(n) maxDeg = d } return true @@ -153,8 +215,8 @@ func (g Undirected) BKPivotMaxDegree(P, X *Bits) (p NI) { // The strategy is to simply pick the first node in P. // // There are equivalent labeled and unlabeled versions of this method. -func (g Undirected) BKPivotMinP(P, X *Bits) NI { - return P.From(0) +func (g Undirected) BKPivotMinP(P, X bits.Bits) NI { + return NI(P.OneFrom(0)) } // BronKerbosch2 finds maximal cliques in an undirected graph. @@ -179,50 +241,55 @@ func (g Undirected) BKPivotMinP(P, X *Bits) NI { // // See also simpler variant BronKerbosch1 and more sophisticated variant // BronKerbosch3. -func (g Undirected) BronKerbosch2(pivot func(P, X *Bits) NI, emit func([]NI) bool) { +func (g Undirected) BronKerbosch2(pivot func(P, X bits.Bits) NI, emit func(bits.Bits) bool) { a := g.AdjacencyList - var f func(R, P, X *Bits) bool - f = func(R, P, X *Bits) bool { + var f func(R, P, X bits.Bits) bool + f = func(R, P, X bits.Bits) bool { switch { - case !P.Zero(): - var r2, p2, x2, pnu Bits + case !P.AllZeros(): + r2 := bits.New(len(a)) + p2 := bits.New(len(a)) + x2 := bits.New(len(a)) + pnu := bits.New(len(a)) // compute P \ N(u). next 5 lines are only difference from BK1 - pnu.Set(*P) + pnu.Set(P) for _, to := range a[pivot(P, X)] { - pnu.SetBit(to, 0) + pnu.SetBit(int(to), 0) } // remaining code like BK1 - pf := func(n NI) bool { - r2.Set(*R) + pf := func(n int) bool { + r2.Set(R) r2.SetBit(n, 1) - p2.Clear() - x2.Clear() + p2.ClearAll() + x2.ClearAll() for _, to := range a[n] { - if P.Bit(to) == 1 { - p2.SetBit(to, 1) + if P.Bit(int(to)) == 1 { + p2.SetBit(int(to), 1) } - if X.Bit(to) == 1 { - x2.SetBit(to, 1) + if X.Bit(int(to)) == 1 { + x2.SetBit(int(to), 1) } } - if !f(&r2, &p2, &x2) { + if !f(r2, p2, x2) { return false } P.SetBit(n, 0) X.SetBit(n, 1) return true } - if !pnu.Iterate(pf) { + if !pnu.IterateOnes(pf) { return false } - case X.Zero(): - return emit(R.Slice()) + case X.AllZeros(): + return emit(R) } return true } - var R, P, X Bits - P.SetAll(len(a)) - f(&R, &P, &X) + R := bits.New(len(a)) + P := bits.New(len(a)) + X := bits.New(len(a)) + P.SetAll() + f(R, P, X) } // BronKerbosch3 finds maximal cliques in an undirected graph. @@ -246,150 +313,196 @@ func (g Undirected) BronKerbosch2(pivot func(P, X *Bits) NI, emit func([]NI) boo // There are equivalent labeled and unlabeled versions of this method. // // See also simpler variants BronKerbosch1 and BronKerbosch2. -func (g Undirected) BronKerbosch3(pivot func(P, X *Bits) NI, emit func([]NI) bool) { +func (g Undirected) BronKerbosch3(pivot func(P, X bits.Bits) NI, emit func(bits.Bits) bool) { a := g.AdjacencyList - var f func(R, P, X *Bits) bool - f = func(R, P, X *Bits) bool { + var f func(R, P, X bits.Bits) bool + f = func(R, P, X bits.Bits) bool { switch { - case !P.Zero(): - var r2, p2, x2, pnu Bits + case !P.AllZeros(): + r2 := bits.New(len(a)) + p2 := bits.New(len(a)) + x2 := bits.New(len(a)) + pnu := bits.New(len(a)) // compute P \ N(u). next lines are only difference from BK1 - pnu.Set(*P) + pnu.Set(P) for _, to := range a[pivot(P, X)] { - pnu.SetBit(to, 0) + pnu.SetBit(int(to), 0) } // remaining code like BK2 - pf := func(n NI) bool { - r2.Set(*R) + pf := func(n int) bool { + r2.Set(R) r2.SetBit(n, 1) - p2.Clear() - x2.Clear() + p2.ClearAll() + x2.ClearAll() for _, to := range a[n] { - if P.Bit(to) == 1 { - p2.SetBit(to, 1) + if P.Bit(int(to)) == 1 { + p2.SetBit(int(to), 1) } - if X.Bit(to) == 1 { - x2.SetBit(to, 1) + if X.Bit(int(to)) == 1 { + x2.SetBit(int(to), 1) } } - if !f(&r2, &p2, &x2) { + if !f(r2, p2, x2) { return false } P.SetBit(n, 0) X.SetBit(n, 1) return true } - if !pnu.Iterate(pf) { + if !pnu.IterateOnes(pf) { return false } - case X.Zero(): - return emit(R.Slice()) + case X.AllZeros(): + return emit(R) } return true } - var R, P, X Bits - P.SetAll(len(a)) + R := bits.New(len(a)) + P := bits.New(len(a)) + X := bits.New(len(a)) + P.SetAll() // code above same as BK2 // code below new to BK3 - _, ord, _ := g.Degeneracy() - var p2, x2 Bits + ord, _ := g.DegeneracyOrdering() + p2 := bits.New(len(a)) + x2 := bits.New(len(a)) for _, n := range ord { - R.SetBit(n, 1) - p2.Clear() - x2.Clear() + R.SetBit(int(n), 1) + p2.ClearAll() + x2.ClearAll() for _, to := range a[n] { - if P.Bit(to) == 1 { - p2.SetBit(to, 1) + if P.Bit(int(to)) == 1 { + p2.SetBit(int(to), 1) } - if X.Bit(to) == 1 { - x2.SetBit(to, 1) + if X.Bit(int(to)) == 1 { + x2.SetBit(int(to), 1) } } - if !f(&R, &p2, &x2) { + if !f(R, p2, x2) { return } - R.SetBit(n, 0) - P.SetBit(n, 0) - X.SetBit(n, 1) + R.SetBit(int(n), 0) + P.SetBit(int(n), 0) + X.SetBit(int(n), 1) } } // ConnectedComponentBits returns a function that iterates over connected // components of g, returning a member bitmap for each. // -// Each call of the returned function returns the order (number of nodes) -// and bits of a connected component. The returned function returns zeros -// after returning all connected components. +// Each call of the returned function returns the order, arc size, +// and bits of a connected component. The underlying bits allocation is +// the same for each call and is overwritten on subsequent calls. Use or +// save the bits before calling the function again. The function returns +// zeros after returning all connected components. // // There are equivalent labeled and unlabeled versions of this method. // -// See also ConnectedComponentReps, which has lighter weight return values. -func (g Undirected) ConnectedComponentBits() func() (order int, bits Bits) { +// See also ConnectedComponentInts, ConnectedComponentReps, and +// ConnectedComponentReps. +func (g Undirected) ConnectedComponentBits() func() (order, arcSize int, bits bits.Bits) { a := g.AdjacencyList - var vg Bits // nodes visited in graph - var vc *Bits // nodes visited in current component - var nc int + vg := bits.New(len(a)) // nodes visited in graph + vc := bits.New(len(a)) // nodes visited in current component + var order, arcSize int var df func(NI) df = func(n NI) { - vg.SetBit(n, 1) - vc.SetBit(n, 1) - nc++ + vg.SetBit(int(n), 1) + vc.SetBit(int(n), 1) + order++ + arcSize += len(a[n]) for _, nb := range a[n] { - if vg.Bit(nb) == 0 { + if vg.Bit(int(nb)) == 0 { df(nb) } } return } - var n NI - return func() (o int, bits Bits) { - for ; n < NI(len(a)); n++ { + var n int + return func() (o, ma int, b bits.Bits) { + for ; n < len(a); n++ { if vg.Bit(n) == 0 { - vc = &bits - nc = 0 - df(n) - return nc, bits + vc.ClearAll() + order, arcSize = 0, 0 + df(NI(n)) + return order, arcSize, vc + } + } + return // return zeros signalling no more components + } +} + +// ConnectedComponenInts returns a list of component numbers (ints) for each +// node of graph g. +// +// The method assigns numbers to components 1-based, 1 through the number of +// components. Return value ci contains the component number for each node. +// Return value nc is the number of components. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also ConnectedComponentBits, ConnectedComponentLists, and +// ConnectedComponentReps. +func (g Undirected) ConnectedComponentInts() (ci []int, nc int) { + a := g.AdjacencyList + ci = make([]int, len(a)) + var df func(NI) + df = func(nd NI) { + ci[nd] = nc + for _, to := range a[nd] { + if ci[to] == 0 { + df(to) } } return } + for nd := range a { + if ci[nd] == 0 { + nc++ + df(NI(nd)) + } + } + return } // ConnectedComponentLists returns a function that iterates over connected // components of g, returning the member list of each. // // Each call of the returned function returns a node list of a connected -// component. The returned function returns nil after returning all connected -// components. +// component and the arc size of the component. The returned function returns +// nil, 0 after returning all connected components. // // There are equivalent labeled and unlabeled versions of this method. // -// See also ConnectedComponentReps, which has lighter weight return values. -func (g Undirected) ConnectedComponentLists() func() []NI { +// See also ConnectedComponentBits, ConnectedComponentInts, and +// ConnectedComponentReps. +func (g Undirected) ConnectedComponentLists() func() (nodes []NI, arcSize int) { a := g.AdjacencyList - var vg Bits // nodes visited in graph - var m []NI // members of current component + vg := bits.New(len(a)) // nodes visited in graph + var l []NI // accumulated node list of current component + var ma int // accumulated arc size of current component var df func(NI) df = func(n NI) { - vg.SetBit(n, 1) - m = append(m, n) + vg.SetBit(int(n), 1) + l = append(l, n) + ma += len(a[n]) for _, nb := range a[n] { - if vg.Bit(nb) == 0 { + if vg.Bit(int(nb)) == 0 { df(nb) } } return } - var n NI - return func() []NI { - for ; n < NI(len(a)); n++ { + var n int + return func() ([]NI, int) { + for ; n < len(a); n++ { if vg.Bit(n) == 0 { - m = nil - df(n) - return m + l, ma = nil, 0 + df(NI(n)) + return l, ma } } - return nil + return nil, 0 } } @@ -397,8 +510,8 @@ func (g Undirected) ConnectedComponentLists() func() []NI { // component of g. // // Returned is a slice with a single representative node from each connected -// component and also a parallel slice with the order, or number of nodes, -// in the corresponding component. +// component and also parallel slices with the orders and arc sizes +// in the corresponding components. // // This is fairly minimal information describing connected components. // From a representative node, other nodes in the component can be reached @@ -409,27 +522,29 @@ func (g Undirected) ConnectedComponentLists() func() []NI { // See also ConnectedComponentBits and ConnectedComponentLists which can // collect component members in a single traversal, and IsConnected which // is an even simpler boolean test. -func (g Undirected) ConnectedComponentReps() (reps []NI, orders []int) { +func (g Undirected) ConnectedComponentReps() (reps []NI, orders, arcSizes []int) { a := g.AdjacencyList - var c Bits - var o int + c := bits.New(len(a)) + var o, ma int var df func(NI) df = func(n NI) { - c.SetBit(n, 1) + c.SetBit(int(n), 1) o++ + ma += len(a[n]) for _, nb := range a[n] { - if c.Bit(nb) == 0 { + if c.Bit(int(nb)) == 0 { df(nb) } } return } for n := range a { - if c.Bit(NI(n)) == 0 { - reps = append(reps, NI(n)) - o = 0 + if c.Bit(n) == 0 { + o, ma = 0, 0 df(NI(n)) + reps = append(reps, NI(n)) orders = append(orders, o) + arcSizes = append(arcSizes, ma) } } return @@ -444,16 +559,97 @@ func (g Undirected) Copy() (c Undirected, ma int) { return Undirected{l}, s } -// Degeneracy computes k-degeneracy, vertex ordering and k-cores. +// Degeneracy is a measure of dense subgraphs within a graph. // // See Wikipedia https://en.wikipedia.org/wiki/Degeneracy_(graph_theory) // +// See also method DegeneracyOrdering which returns a degeneracy node +// ordering and k-core breaks. +// // There are equivalent labeled and unlabeled versions of this method. -func (g Undirected) Degeneracy() (k int, ordering []NI, cores []int) { +func (g Undirected) Degeneracy() (k int) { + a := g.AdjacencyList + // WP algorithm, attributed to Matula and Beck. + L := bits.New(len(a)) + d := make([]int, len(a)) + var D [][]NI + for v, nb := range a { + dv := len(nb) + d[v] = dv + for len(D) <= dv { + D = append(D, nil) + } + D[dv] = append(D[dv], NI(v)) + } + for range a { + // find a non-empty D + i := 0 + for len(D[i]) == 0 { + i++ + } + // k is max(i, k) + if i > k { + k = i + } + // select from D[i] + Di := D[i] + last := len(Di) - 1 + v := Di[last] + // Add v to ordering, remove from Di + L.SetBit(int(v), 1) + D[i] = Di[:last] + // move neighbors + for _, nb := range a[v] { + if L.Bit(int(nb)) == 1 { + continue + } + dn := d[nb] // old number of neighbors of nb + Ddn := D[dn] // nb is in this list + // remove it from the list + for wx, w := range Ddn { + if w == nb { + last := len(Ddn) - 1 + Ddn[wx], Ddn[last] = Ddn[last], Ddn[wx] + D[dn] = Ddn[:last] + } + } + dn-- // new number of neighbors + d[nb] = dn + // re--add it to it's new list + D[dn] = append(D[dn], nb) + } + } + return +} + +// DegeneracyOrdering computes degeneracy node ordering and k-core breaks. +// +// See Wikipedia https://en.wikipedia.org/wiki/Degeneracy_(graph_theory) +// +// In return value ordering, nodes are ordered by their "coreness" as +// defined at https://en.wikipedia.org/wiki/Degeneracy_(graph_theory)#k-Cores. +// +// Return value kbreaks indexes ordering by coreness number. len(kbreaks) +// will be one more than the graph degeneracy as returned by the Degeneracy +// method. If degeneracy is d, d = len(kbreaks) - 1, kbreaks[d] is the last +// value in kbreaks and ordering[:kbreaks[d]] contains nodes of the d-cores +// of the graph. kbreaks[0] is always the number of nodes in g as all nodes +// are in in a 0-core. +// +// Note that definitions of "k-core" differ on whether a k-core must be a +// single connected component. This method does not resolve individual +// connected components. +// +// See also method Degeneracy which returns just the degeneracy number. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Undirected) DegeneracyOrdering() (ordering []NI, kbreaks []int) { a := g.AdjacencyList // WP algorithm + k := 0 ordering = make([]NI, len(a)) - var L Bits + kbreaks = []int{len(a)} + L := bits.New(len(a)) d := make([]int, len(a)) var D [][]NI for v, nb := range a { @@ -464,7 +660,7 @@ func (g Undirected) Degeneracy() (k int, ordering []NI, cores []int) { } D[dv] = append(D[dv], NI(v)) } - for ox := range a { + for ox := len(a) - 1; ox >= 0; ox-- { // find a non-empty D i := 0 for len(D[i]) == 0 { @@ -472,10 +668,9 @@ func (g Undirected) Degeneracy() (k int, ordering []NI, cores []int) { } // k is max(i, k) if i > k { - for len(cores) <= i { - cores = append(cores, 0) + for len(kbreaks) <= i { + kbreaks = append(kbreaks, ox+1) } - cores[k] = ox k = i } // select from D[i] @@ -484,11 +679,11 @@ func (g Undirected) Degeneracy() (k int, ordering []NI, cores []int) { v := Di[last] // Add v to ordering, remove from Di ordering[ox] = v - L.SetBit(v, 1) + L.SetBit(int(v), 1) D[i] = Di[:last] // move neighbors for _, nb := range a[v] { - if L.Bit(nb) == 1 { + if L.Bit(int(nb)) == 1 { continue } dn := d[nb] // old number of neighbors of nb @@ -507,7 +702,9 @@ func (g Undirected) Degeneracy() (k int, ordering []NI, cores []int) { D[dn] = append(D[dn], nb) } } - cores[k] = len(ordering) + //for i, j := 0, k; i < j; i, j = i+1, j-1 { + // kbreaks[i], kbreaks[j] = kbreaks[j], kbreaks[i] + //} return } @@ -531,57 +728,291 @@ func (g Undirected) Degree(n NI) int { return d } -// FromList constructs a FromList representing the tree reachable from -// the given root. +// DegreeCentralization returns the degree centralization metric of a graph. +// +// Degree of a node is one measure of node centrality and is directly +// available from the adjacency list representation. This allows degree +// centralization for the graph to be very efficiently computed. +// +// The value returned is from 0 to 1 inclusive for simple graphs of three or +// more nodes. As a special case, 0 is returned for graphs of two or fewer +// nodes. The value returned can be > 1 for graphs with loops or parallel +// edges. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Undirected) DegreeCentralization() float64 { + a := g.AdjacencyList + if len(a) <= 2 { + return 0 + } + var max, sum int + for _, to := range a { + if len(to) > max { + max = len(to) + } + sum += len(to) + } + return float64(len(a)*max-sum) / float64((len(a)-1)*(len(a)-2)) +} + +// Density returns density for a simple graph. // -// The connected component containing root should represent a simple graph, -// connected as a tree. +// See also Density function. // -// For nodes connected as a tree, the Path member of the returned FromList -// will be populated with both From and Len values. The MaxLen member will be -// set but Leaves will be left a zero value. Return value cycle will be -1. +// There are equivalent labeled and unlabeled versions of this method. +func (g Undirected) Density() float64 { + return Density(g.Order(), g.Size()) +} + +// Eulerian scans an undirected graph to determine if it is Eulerian. +// +// If the graph represents an Eulerian cycle, it returns -1, -1, nil. // -// If the connected component containing root is not connected as a tree, -// a cycle will be detected. The returned FromList will be a zero value and -// return value cycle will be a node involved in the cycle. +// If the graph does not represent an Eulerian cycle but does represent an +// Eulerian path, it returns the two end nodes of the path, and nil. // -// Loops and parallel edges will be detected as cycles, however only in the -// connected component containing root. If g is not fully connected, nodes -// not reachable from root will have PathEnd values of {From: -1, Len: 0}. +// Otherwise it returns an error. +// +// See also method EulerianStart, which short-circuits as soon as it finds +// a node that must be a start or end node of an Eulerian path. // // There are equivalent labeled and unlabeled versions of this method. -func (g Undirected) FromList(root NI) (f FromList, cycle NI) { - p := make([]PathEnd, len(g.AdjacencyList)) - for i := range p { - p[i].From = -1 +func (g Undirected) Eulerian() (end1, end2 NI, err error) { + end1 = -1 + end2 = -1 + for n := range g.AdjacencyList { + switch { + case g.Degree(NI(n))%2 == 0: + case end1 < 0: + end1 = NI(n) + case end2 < 0: + end2 = NI(n) + default: + err = errors.New("non-Eulerian") + return + } } - ml := 0 - var df func(NI, NI) bool - df = func(fr, n NI) bool { - l := p[n].Len + 1 - for _, to := range g.AdjacencyList[n] { - if to == fr { - continue - } - if p[to].Len > 0 { - cycle = to - return false - } - p[to] = PathEnd{From: n, Len: l} - if l > ml { - ml = l - } - if !df(n, to) { - return false - } + return +} + +// EulerianCycle finds an Eulerian cycle in an undirected multigraph. +// +// * If g has no nodes, result is nil, nil. +// +// * If g is Eulerian, result is an Eulerian cycle with err = nil. +// The first element of the result represents only a start node. +// The remaining elements represent the half arcs of the cycle. +// +// * Otherwise, result is nil, with a non-nil error giving a reason the graph +// is not Eulerian. +// +// Internally, EulerianCycle copies the entire graph g. +// See EulerianCycleD for a more space efficient version. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Undirected) EulerianCycle() ([]NI, error) { + c, _ := g.Copy() + return c.EulerianCycleD(c.Size()) +} + +// EulerianCycleD finds an Eulerian cycle in an undirected multigraph. +// +// EulerianCycleD is destructive on its receiver g. See EulerianCycle for +// a non-destructive version. +// +// Parameter m must be the size of the undirected graph -- the +// number of edges. Use Undirected.Size if the size is unknown. +// +// * If g has no nodes, result is nil, nil. +// +// * If g is Eulerian, result is an Eulerian cycle with err = nil. +// The first element of the result represents only a start node. +// The remaining elements represent the half arcs of the cycle. +// +// * Otherwise, result is nil, with a non-nil error giving a reason the graph +// is not Eulerian. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Undirected) EulerianCycleD(m int) ([]NI, error) { + if g.Order() == 0 { + return nil, nil + } + e := newEulerian(g.AdjacencyList, m) + e.p[0] = 0 + for e.s >= 0 { + v := e.top() + if err := e.pushUndir(); err != nil { + return nil, err } - return true + if e.top() != v { + return nil, errors.New("not Eulerian") + } + e.keep() } - p[root].Len = 1 - if !df(-1, root) { - return + if !e.uv.AllZeros() { + return nil, errors.New("not strongly connected") } - return FromList{Paths: p, MaxLen: ml}, -1 + return e.p, nil +} + +// EulerianPath finds an Eulerian path in an undirected multigraph. +// +// * If g has no nodes, result is nil, nil. +// +// * If g has an Eulerian path, result is an Eulerian path with err = nil. +// The first element of the result represents only a start node. +// The remaining elements represent the half arcs of the path. +// +// * Otherwise, result is nil, with a non-nil error giving a reason the graph +// is not Eulerian. +// +// Internally, EulerianPath copies the entire graph g. +// See EulerianPathD for a more space efficient version. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Undirected) EulerianPath() ([]NI, error) { + c, _ := g.Copy() + start := c.EulerianStart() + if start < 0 { + start = 0 + } + return c.EulerianPathD(c.Size(), start) +} + +// EulerianPathD finds an Eulerian path in a undirected multigraph. +// +// EulerianPathD is destructive on its receiver g. See EulerianPath for +// a non-destructive version. +// +// Argument m must be the correct size, or number of edges in g. +// Argument start must be a valid start node for the path. +// +// * If g has no nodes, result is nil, nil. +// +// * If g has an Eulerian path starting at start, result is an Eulerian path +// with err = nil. +// The first element of the result represents only a start node. +// The remaining elements represent the half arcs of the path. +// +// * Otherwise, result is nil, with a non-nil error giving a reason the graph +// is not Eulerian. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Undirected) EulerianPathD(m int, start NI) ([]NI, error) { + if g.Order() == 0 { + return nil, nil + } + e := newEulerian(g.AdjacencyList, m) + e.p[0] = start + // unlike EulerianCycle, the first path doesn't have to be a cycle. + if err := e.pushUndir(); err != nil { + return nil, err + } + e.keep() + for e.s >= 0 { + start = e.top() + e.push() + // paths after the first must be cycles though + // (as long as there are nodes on the stack) + if e.top() != start { + return nil, errors.New("no Eulerian path") + } + e.keep() + } + if !e.uv.AllZeros() { + return nil, errors.New("no Eulerian path") + } + return e.p, nil +} + +// EulerianStart finds a candidate start node for an Eulerian path. +// +// A graph representing an Eulerian path can have two nodes with odd degree. +// If it does, these must be the end nodes of the path. EulerianEnd scans +// for a node with an odd degree, returning immediately with the first one +// it finds. +// +// If the scan completes without finding a node with odd degree the method +// returns -1. +// +// See also method Eulerian, which completely validates a graph as representing +// an Eulerian path. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Undirected) EulerianStart() NI { + for n := range g.AdjacencyList { + if g.Degree(NI(n))%2 != 0 { + return NI(n) + } + } + return -1 +} + +// AddNode maps a node in a supergraph to a subgraph node. +// +// Argument p must be an NI in supergraph s.Super. AddNode panics if +// p is not a valid node index of s.Super. +// +// AddNode is idempotent in that it does not add a new node to the subgraph if +// a subgraph node already exists mapped to supergraph node p. +// +// The mapped subgraph NI is returned. +func (s *UndirectedSubgraph) AddNode(p NI) (b NI) { + if int(p) < 0 || int(p) >= s.Super.Order() { + panic(fmt.Sprint("AddNode: NI ", p, " not in supergraph")) + } + if b, ok := s.SubNI[p]; ok { + return b + } + a := s.Undirected.AdjacencyList + b = NI(len(a)) + s.Undirected.AdjacencyList = append(a, nil) + s.SuperNI = append(s.SuperNI, p) + s.SubNI[p] = b + return +} + +// InduceList constructs a node-induced subgraph. +// +// The subgraph is induced on receiver graph g. Argument l must be a list of +// NIs in receiver graph g. Receiver g becomes the supergraph of the induced +// subgraph. +// +// Duplicate NIs are allowed in list l. The duplicates are effectively removed +// and only a single corresponding node is created in the subgraph. Subgraph +// NIs are mapped in the order of list l, execpt for ignoring duplicates. +// NIs in l that are not in g will panic. +// +// Returned is the constructed Subgraph object containing the induced subgraph +// and the mappings to the supergraph. +func (g *Undirected) InduceList(l []NI) *UndirectedSubgraph { + sub, sup := mapList(l) + return &UndirectedSubgraph{ + Super: g, + SubNI: sub, + SuperNI: sup, + Undirected: Undirected{ + g.AdjacencyList.induceArcs(sub, sup), + }} +} + +// InduceBits constructs a node-induced subgraph. +// +// The subgraph is induced on receiver graph g. Argument t must be a bitmap +// representing NIs in receiver graph g. Receiver g becomes the supergraph +// of the induced subgraph. NIs in t that are not in g will panic. +// +// Returned is the constructed Subgraph object containing the induced subgraph +// and the mappings to the supergraph. +func (g *Undirected) InduceBits(t bits.Bits) *UndirectedSubgraph { + sub, sup := mapBits(t) + return &UndirectedSubgraph{ + Super: g, + SubNI: sub, + SuperNI: sup, + Undirected: Undirected{ + g.AdjacencyList.induceArcs(sub, sup), + }} } // IsConnected tests if an undirected graph is a single connected component. @@ -594,19 +1025,18 @@ func (g Undirected) IsConnected() bool { if len(a) == 0 { return true } - var b Bits - b.SetAll(len(a)) + b := bits.New(len(a)) var df func(NI) df = func(n NI) { - b.SetBit(n, 0) + b.SetBit(int(n), 1) for _, to := range a[n] { - if b.Bit(to) == 1 { + if b.Bit(int(to)) == 0 { df(to) } } } df(0) - return b.Zero() + return b.AllOnes() } // IsTree identifies trees in undirected graphs. @@ -618,14 +1048,14 @@ func (g Undirected) IsConnected() bool { // There are equivalent labeled and unlabeled versions of this method. func (g Undirected) IsTree(root NI) (isTree, allTree bool) { a := g.AdjacencyList - var v Bits - v.SetAll(len(a)) + v := bits.New(len(a)) + v.SetAll() var df func(NI, NI) bool df = func(fr, n NI) bool { - if v.Bit(n) == 0 { + if v.Bit(int(n)) == 0 { return false } - v.SetBit(n, 0) + v.SetBit(int(n), 0) for _, to := range a[n] { if to != fr && !df(n, to) { return false @@ -633,18 +1063,18 @@ func (g Undirected) IsTree(root NI) (isTree, allTree bool) { } return true } - v.SetBit(root, 0) + v.SetBit(int(root), 0) for _, to := range a[root] { if !df(root, to) { return false, false } } - return true, v.Zero() + return true, v.AllZeros() } // Size returns the number of edges in g. // -// See also ArcSize and HasLoop. +// See also ArcSize and AnyLoop. func (g Undirected) Size() int { m2 := 0 for fr, to := range g.AdjacencyList { @@ -657,3 +1087,52 @@ func (g Undirected) Size() int { } return m2 / 2 } + +// Density returns edge density of a bipartite graph. +// +// Edge density is number of edges over maximum possible number of edges. +// Maximum possible number of edges in a bipartite graph is number of +// nodes of one color times number of nodes of the other color. +func (g Bipartite) Density() float64 { + a := g.Undirected.AdjacencyList + s := 0 + g.Color.IterateOnes(func(n int) bool { + s += len(a[n]) + return true + }) + return float64(s) / float64(g.N0*(len(a)-g.N0)) +} + +// PermuteBiadjacency permutes a bipartite graph in place so that a prefix +// of the adjacency list encodes a biadjacency matrix. +// +// The permutation applied is returned. This would be helpful in referencing +// any externally stored node information. +// +// The biadjacency matrix is encoded as the prefix AdjacencyList[:g.N0]. +// Note though that this slice does not represent a valid complete +// AdjacencyList. BoundsOk would return false, for example. +// +// In adjacency list terms, the result of the permutation is that nodes of +// the prefix only have arcs to the suffix and nodes of the suffix only have +// arcs to the prefix. +func (g Bipartite) PermuteBiadjacency() []int { + p := make([]int, g.Order()) + i := 0 + g.Color.IterateZeros(func(n int) bool { + p[n] = i + i++ + return true + }) + g.Color.IterateOnes(func(n int) bool { + p[n] = i + i++ + return true + }) + g.Permute(p) + g.Color.ClearAll() + for i := g.N0; i < g.Order(); i++ { + g.Color.SetBit(i, 1) + } + return p +} diff --git a/vendor/github.com/soniakeys/graph/undir_cg.go b/vendor/github.com/soniakeys/graph/undir_cg.go index 35b5b97d..ed932241 100644 --- a/vendor/github.com/soniakeys/graph/undir_cg.go +++ b/vendor/github.com/soniakeys/graph/undir_cg.go @@ -3,40 +3,97 @@ package graph +import ( + "errors" + "fmt" + + "github.com/soniakeys/bits" +) + // undir_RO.go is code generated from undir_cg.go by directives in graph.go. // Editing undir_cg.go is okay. It is the code generation source. // DO NOT EDIT undir_RO.go. // The RO means read only and it is upper case RO to slow you down a bit // in case you start to edit the file. +//------------------- + +// Bipartite constructs an object indexing the bipartite structure of a graph. +// +// In a bipartite component, nodes can be partitioned into two sets, or +// "colors," such that every edge in the component goes from one set to the +// other. +// +// If the graph is bipartite, the method constructs and returns a new +// Bipartite object as b and returns ok = true. +// +// If the component is not bipartite, a representative odd cycle as oc and +// returns ok = false. +// +// In the case of a graph with mulitiple connected components, this method +// provides no control over the color orientation by component. See +// Undirected.BipartiteComponent if this control is needed. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledUndirected) Bipartite() (b *LabeledBipartite, oc []NI, ok bool) { + c1 := bits.New(g.Order()) + c2 := bits.New(g.Order()) + r, _, _ := g.ConnectedComponentReps() + // accumulate n2 number of zero bits in c2 as number of one bits in n1 + var n, n2 int + for _, r := range r { + ok, n, _, oc = g.BipartiteComponent(r, c1, c2) + if !ok { + return + } + n2 += n + } + return &LabeledBipartite{g, c2, n2}, nil, true +} -// Bipartite determines if a connected component of an undirected graph -// is bipartite, a component where nodes can be partitioned into two sets -// such that every edge in the component goes from one set to the other. +// BipartiteComponent analyzes the bipartite structure of a connected component +// of an undirected graph. // -// Argument n can be any representative node of the component. +// In a bipartite component, nodes can be partitioned into two sets, or +// "colors," such that every edge in the component goes from one set to the +// other. // -// If the component is bipartite, Bipartite returns true and a two-coloring -// of the component. Each color set is returned as a bitmap. If the component -// is not bipartite, Bipartite returns false and a representative odd cycle. +// Argument n can be any representative node of the component to be analyzed. +// Arguments c1 and c2 must be separate bits.Bits objects constructed to be +// of length of the number of nodes of g. These bitmaps are used in the +// component traversal and the bits of the component must be zero when the +// method is called. +// +// If the component is bipartite, BipartiteComponent populates bitmaps +// c1 and c2 with the two-coloring of the component, always assigning the set +// with representative node n to bitmap c1. It returns b = true, +// and also returns the number of bits set in c1 and c2 as n1 and n2 +// respectively. +// +// If the component is not bipartite, BipartiteComponent returns b = false +// and a representative odd cycle as oc. +// +// See also method Bipartite. // // There are equivalent labeled and unlabeled versions of this method. -func (g LabeledUndirected) Bipartite(n NI) (b bool, c1, c2 Bits, oc []NI) { +func (g LabeledUndirected) BipartiteComponent(n NI, c1, c2 bits.Bits) (b bool, n1, n2 int, oc []NI) { + a := g.LabeledAdjacencyList b = true var open bool - var df func(n NI, c1, c2 *Bits) - df = func(n NI, c1, c2 *Bits) { - c1.SetBit(n, 1) - for _, nb := range g.LabeledAdjacencyList[n] { - if c1.Bit(nb.To) == 1 { + var df func(n NI, c1, c2 *bits.Bits, n1, n2 *int) + df = func(n NI, c1, c2 *bits.Bits, n1, n2 *int) { + c1.SetBit(int(n), 1) + *n1++ + for _, nb := range a[n] { + if c1.Bit(int(nb.To)) == 1 { b = false oc = []NI{nb.To, n} open = true return } - if c2.Bit(nb.To) == 1 { + if c2.Bit(int(nb.To)) == 1 { continue } - df(nb.To, c2, c1) + df(nb.To, c2, c1, n2, n1) if b { continue } @@ -50,11 +107,11 @@ func (g LabeledUndirected) Bipartite(n NI) (b bool, c1, c2 Bits, oc []NI) { return } } - df(n, &c1, &c2) + df(n, &c1, &c2, &n1, &n2) if b { - return b, c1, c2, nil + return b, n1, n2, nil } - return b, Bits{}, Bits{}, oc + return b, 0, 0, oc } // BronKerbosch1 finds maximal cliques in an undirected graph. @@ -74,44 +131,49 @@ func (g LabeledUndirected) Bipartite(n NI) (b bool, c1, c2 Bits, oc []NI) { // There are equivalent labeled and unlabeled versions of this method. // // See also more sophisticated variants BronKerbosch2 and BronKerbosch3. -func (g LabeledUndirected) BronKerbosch1(emit func([]NI) bool) { +func (g LabeledUndirected) BronKerbosch1(emit func(bits.Bits) bool) { a := g.LabeledAdjacencyList - var f func(R, P, X *Bits) bool - f = func(R, P, X *Bits) bool { + var f func(R, P, X bits.Bits) bool + f = func(R, P, X bits.Bits) bool { switch { - case !P.Zero(): - var r2, p2, x2 Bits - pf := func(n NI) bool { - r2.Set(*R) + case !P.AllZeros(): + r2 := bits.New(len(a)) + p2 := bits.New(len(a)) + x2 := bits.New(len(a)) + pf := func(n int) bool { + r2.Set(R) r2.SetBit(n, 1) - p2.Clear() - x2.Clear() + p2.ClearAll() + x2.ClearAll() for _, to := range a[n] { - if P.Bit(to.To) == 1 { - p2.SetBit(to.To, 1) + if P.Bit(int(to.To)) == 1 { + p2.SetBit(int(to.To), 1) } - if X.Bit(to.To) == 1 { - x2.SetBit(to.To, 1) + if X.Bit(int(to.To)) == 1 { + x2.SetBit(int(to.To), 1) } } - if !f(&r2, &p2, &x2) { + if !f(r2, p2, x2) { return false } P.SetBit(n, 0) X.SetBit(n, 1) return true } - if !P.Iterate(pf) { + if !P.IterateOnes(pf) { return false } - case X.Zero(): - return emit(R.Slice()) + case X.AllZeros(): + return emit(R) } return true } - var R, P, X Bits - P.SetAll(len(a)) - f(&R, &P, &X) + var R, P, X bits.Bits + R = bits.New(len(a)) + P = bits.New(len(a)) + X = bits.New(len(a)) + P.SetAll() + f(R, P, X) } // BKPivotMaxDegree is a strategy for BronKerbosch methods. @@ -124,20 +186,20 @@ func (g LabeledUndirected) BronKerbosch1(emit func([]NI) bool) { // in P. // // There are equivalent labeled and unlabeled versions of this method. -func (g LabeledUndirected) BKPivotMaxDegree(P, X *Bits) (p NI) { +func (g LabeledUndirected) BKPivotMaxDegree(P, X bits.Bits) (p NI) { // choose pivot u as highest degree node from P or X a := g.LabeledAdjacencyList maxDeg := -1 - P.Iterate(func(n NI) bool { // scan P + P.IterateOnes(func(n int) bool { // scan P if d := len(a[n]); d > maxDeg { - p = n + p = NI(n) maxDeg = d } return true }) - X.Iterate(func(n NI) bool { // scan X + X.IterateOnes(func(n int) bool { // scan X if d := len(a[n]); d > maxDeg { - p = n + p = NI(n) maxDeg = d } return true @@ -153,8 +215,8 @@ func (g LabeledUndirected) BKPivotMaxDegree(P, X *Bits) (p NI) { // The strategy is to simply pick the first node in P. // // There are equivalent labeled and unlabeled versions of this method. -func (g LabeledUndirected) BKPivotMinP(P, X *Bits) NI { - return P.From(0) +func (g LabeledUndirected) BKPivotMinP(P, X bits.Bits) NI { + return NI(P.OneFrom(0)) } // BronKerbosch2 finds maximal cliques in an undirected graph. @@ -179,50 +241,55 @@ func (g LabeledUndirected) BKPivotMinP(P, X *Bits) NI { // // See also simpler variant BronKerbosch1 and more sophisticated variant // BronKerbosch3. -func (g LabeledUndirected) BronKerbosch2(pivot func(P, X *Bits) NI, emit func([]NI) bool) { +func (g LabeledUndirected) BronKerbosch2(pivot func(P, X bits.Bits) NI, emit func(bits.Bits) bool) { a := g.LabeledAdjacencyList - var f func(R, P, X *Bits) bool - f = func(R, P, X *Bits) bool { + var f func(R, P, X bits.Bits) bool + f = func(R, P, X bits.Bits) bool { switch { - case !P.Zero(): - var r2, p2, x2, pnu Bits + case !P.AllZeros(): + r2 := bits.New(len(a)) + p2 := bits.New(len(a)) + x2 := bits.New(len(a)) + pnu := bits.New(len(a)) // compute P \ N(u). next 5 lines are only difference from BK1 - pnu.Set(*P) + pnu.Set(P) for _, to := range a[pivot(P, X)] { - pnu.SetBit(to.To, 0) + pnu.SetBit(int(to.To), 0) } // remaining code like BK1 - pf := func(n NI) bool { - r2.Set(*R) + pf := func(n int) bool { + r2.Set(R) r2.SetBit(n, 1) - p2.Clear() - x2.Clear() + p2.ClearAll() + x2.ClearAll() for _, to := range a[n] { - if P.Bit(to.To) == 1 { - p2.SetBit(to.To, 1) + if P.Bit(int(to.To)) == 1 { + p2.SetBit(int(to.To), 1) } - if X.Bit(to.To) == 1 { - x2.SetBit(to.To, 1) + if X.Bit(int(to.To)) == 1 { + x2.SetBit(int(to.To), 1) } } - if !f(&r2, &p2, &x2) { + if !f(r2, p2, x2) { return false } P.SetBit(n, 0) X.SetBit(n, 1) return true } - if !pnu.Iterate(pf) { + if !pnu.IterateOnes(pf) { return false } - case X.Zero(): - return emit(R.Slice()) + case X.AllZeros(): + return emit(R) } return true } - var R, P, X Bits - P.SetAll(len(a)) - f(&R, &P, &X) + R := bits.New(len(a)) + P := bits.New(len(a)) + X := bits.New(len(a)) + P.SetAll() + f(R, P, X) } // BronKerbosch3 finds maximal cliques in an undirected graph. @@ -246,150 +313,196 @@ func (g LabeledUndirected) BronKerbosch2(pivot func(P, X *Bits) NI, emit func([] // There are equivalent labeled and unlabeled versions of this method. // // See also simpler variants BronKerbosch1 and BronKerbosch2. -func (g LabeledUndirected) BronKerbosch3(pivot func(P, X *Bits) NI, emit func([]NI) bool) { +func (g LabeledUndirected) BronKerbosch3(pivot func(P, X bits.Bits) NI, emit func(bits.Bits) bool) { a := g.LabeledAdjacencyList - var f func(R, P, X *Bits) bool - f = func(R, P, X *Bits) bool { + var f func(R, P, X bits.Bits) bool + f = func(R, P, X bits.Bits) bool { switch { - case !P.Zero(): - var r2, p2, x2, pnu Bits + case !P.AllZeros(): + r2 := bits.New(len(a)) + p2 := bits.New(len(a)) + x2 := bits.New(len(a)) + pnu := bits.New(len(a)) // compute P \ N(u). next lines are only difference from BK1 - pnu.Set(*P) + pnu.Set(P) for _, to := range a[pivot(P, X)] { - pnu.SetBit(to.To, 0) + pnu.SetBit(int(to.To), 0) } // remaining code like BK2 - pf := func(n NI) bool { - r2.Set(*R) + pf := func(n int) bool { + r2.Set(R) r2.SetBit(n, 1) - p2.Clear() - x2.Clear() + p2.ClearAll() + x2.ClearAll() for _, to := range a[n] { - if P.Bit(to.To) == 1 { - p2.SetBit(to.To, 1) + if P.Bit(int(to.To)) == 1 { + p2.SetBit(int(to.To), 1) } - if X.Bit(to.To) == 1 { - x2.SetBit(to.To, 1) + if X.Bit(int(to.To)) == 1 { + x2.SetBit(int(to.To), 1) } } - if !f(&r2, &p2, &x2) { + if !f(r2, p2, x2) { return false } P.SetBit(n, 0) X.SetBit(n, 1) return true } - if !pnu.Iterate(pf) { + if !pnu.IterateOnes(pf) { return false } - case X.Zero(): - return emit(R.Slice()) + case X.AllZeros(): + return emit(R) } return true } - var R, P, X Bits - P.SetAll(len(a)) + R := bits.New(len(a)) + P := bits.New(len(a)) + X := bits.New(len(a)) + P.SetAll() // code above same as BK2 // code below new to BK3 - _, ord, _ := g.Degeneracy() - var p2, x2 Bits + ord, _ := g.DegeneracyOrdering() + p2 := bits.New(len(a)) + x2 := bits.New(len(a)) for _, n := range ord { - R.SetBit(n, 1) - p2.Clear() - x2.Clear() + R.SetBit(int(n), 1) + p2.ClearAll() + x2.ClearAll() for _, to := range a[n] { - if P.Bit(to.To) == 1 { - p2.SetBit(to.To, 1) + if P.Bit(int(to.To)) == 1 { + p2.SetBit(int(to.To), 1) } - if X.Bit(to.To) == 1 { - x2.SetBit(to.To, 1) + if X.Bit(int(to.To)) == 1 { + x2.SetBit(int(to.To), 1) } } - if !f(&R, &p2, &x2) { + if !f(R, p2, x2) { return } - R.SetBit(n, 0) - P.SetBit(n, 0) - X.SetBit(n, 1) + R.SetBit(int(n), 0) + P.SetBit(int(n), 0) + X.SetBit(int(n), 1) } } // ConnectedComponentBits returns a function that iterates over connected // components of g, returning a member bitmap for each. // -// Each call of the returned function returns the order (number of nodes) -// and bits of a connected component. The returned function returns zeros -// after returning all connected components. +// Each call of the returned function returns the order, arc size, +// and bits of a connected component. The underlying bits allocation is +// the same for each call and is overwritten on subsequent calls. Use or +// save the bits before calling the function again. The function returns +// zeros after returning all connected components. // // There are equivalent labeled and unlabeled versions of this method. // -// See also ConnectedComponentReps, which has lighter weight return values. -func (g LabeledUndirected) ConnectedComponentBits() func() (order int, bits Bits) { +// See also ConnectedComponentInts, ConnectedComponentReps, and +// ConnectedComponentReps. +func (g LabeledUndirected) ConnectedComponentBits() func() (order, arcSize int, bits bits.Bits) { a := g.LabeledAdjacencyList - var vg Bits // nodes visited in graph - var vc *Bits // nodes visited in current component - var nc int + vg := bits.New(len(a)) // nodes visited in graph + vc := bits.New(len(a)) // nodes visited in current component + var order, arcSize int var df func(NI) df = func(n NI) { - vg.SetBit(n, 1) - vc.SetBit(n, 1) - nc++ + vg.SetBit(int(n), 1) + vc.SetBit(int(n), 1) + order++ + arcSize += len(a[n]) for _, nb := range a[n] { - if vg.Bit(nb.To) == 0 { + if vg.Bit(int(nb.To)) == 0 { df(nb.To) } } return } - var n NI - return func() (o int, bits Bits) { - for ; n < NI(len(a)); n++ { + var n int + return func() (o, ma int, b bits.Bits) { + for ; n < len(a); n++ { if vg.Bit(n) == 0 { - vc = &bits - nc = 0 - df(n) - return nc, bits + vc.ClearAll() + order, arcSize = 0, 0 + df(NI(n)) + return order, arcSize, vc + } + } + return // return zeros signalling no more components + } +} + +// ConnectedComponenInts returns a list of component numbers (ints) for each +// node of graph g. +// +// The method assigns numbers to components 1-based, 1 through the number of +// components. Return value ci contains the component number for each node. +// Return value nc is the number of components. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also ConnectedComponentBits, ConnectedComponentLists, and +// ConnectedComponentReps. +func (g LabeledUndirected) ConnectedComponentInts() (ci []int, nc int) { + a := g.LabeledAdjacencyList + ci = make([]int, len(a)) + var df func(NI) + df = func(nd NI) { + ci[nd] = nc + for _, to := range a[nd] { + if ci[to.To] == 0 { + df(to.To) } } return } + for nd := range a { + if ci[nd] == 0 { + nc++ + df(NI(nd)) + } + } + return } // ConnectedComponentLists returns a function that iterates over connected // components of g, returning the member list of each. // // Each call of the returned function returns a node list of a connected -// component. The returned function returns nil after returning all connected -// components. +// component and the arc size of the component. The returned function returns +// nil, 0 after returning all connected components. // // There are equivalent labeled and unlabeled versions of this method. // -// See also ConnectedComponentReps, which has lighter weight return values. -func (g LabeledUndirected) ConnectedComponentLists() func() []NI { +// See also ConnectedComponentBits, ConnectedComponentInts, and +// ConnectedComponentReps. +func (g LabeledUndirected) ConnectedComponentLists() func() (nodes []NI, arcSize int) { a := g.LabeledAdjacencyList - var vg Bits // nodes visited in graph - var m []NI // members of current component + vg := bits.New(len(a)) // nodes visited in graph + var l []NI // accumulated node list of current component + var ma int // accumulated arc size of current component var df func(NI) df = func(n NI) { - vg.SetBit(n, 1) - m = append(m, n) + vg.SetBit(int(n), 1) + l = append(l, n) + ma += len(a[n]) for _, nb := range a[n] { - if vg.Bit(nb.To) == 0 { + if vg.Bit(int(nb.To)) == 0 { df(nb.To) } } return } - var n NI - return func() []NI { - for ; n < NI(len(a)); n++ { + var n int + return func() ([]NI, int) { + for ; n < len(a); n++ { if vg.Bit(n) == 0 { - m = nil - df(n) - return m + l, ma = nil, 0 + df(NI(n)) + return l, ma } } - return nil + return nil, 0 } } @@ -397,8 +510,8 @@ func (g LabeledUndirected) ConnectedComponentLists() func() []NI { // component of g. // // Returned is a slice with a single representative node from each connected -// component and also a parallel slice with the order, or number of nodes, -// in the corresponding component. +// component and also parallel slices with the orders and arc sizes +// in the corresponding components. // // This is fairly minimal information describing connected components. // From a representative node, other nodes in the component can be reached @@ -409,27 +522,29 @@ func (g LabeledUndirected) ConnectedComponentLists() func() []NI { // See also ConnectedComponentBits and ConnectedComponentLists which can // collect component members in a single traversal, and IsConnected which // is an even simpler boolean test. -func (g LabeledUndirected) ConnectedComponentReps() (reps []NI, orders []int) { +func (g LabeledUndirected) ConnectedComponentReps() (reps []NI, orders, arcSizes []int) { a := g.LabeledAdjacencyList - var c Bits - var o int + c := bits.New(len(a)) + var o, ma int var df func(NI) df = func(n NI) { - c.SetBit(n, 1) + c.SetBit(int(n), 1) o++ + ma += len(a[n]) for _, nb := range a[n] { - if c.Bit(nb.To) == 0 { + if c.Bit(int(nb.To)) == 0 { df(nb.To) } } return } for n := range a { - if c.Bit(NI(n)) == 0 { - reps = append(reps, NI(n)) - o = 0 + if c.Bit(n) == 0 { + o, ma = 0, 0 df(NI(n)) + reps = append(reps, NI(n)) orders = append(orders, o) + arcSizes = append(arcSizes, ma) } } return @@ -444,16 +559,97 @@ func (g LabeledUndirected) Copy() (c LabeledUndirected, ma int) { return LabeledUndirected{l}, s } -// Degeneracy computes k-degeneracy, vertex ordering and k-cores. +// Degeneracy is a measure of dense subgraphs within a graph. // // See Wikipedia https://en.wikipedia.org/wiki/Degeneracy_(graph_theory) // +// See also method DegeneracyOrdering which returns a degeneracy node +// ordering and k-core breaks. +// // There are equivalent labeled and unlabeled versions of this method. -func (g LabeledUndirected) Degeneracy() (k int, ordering []NI, cores []int) { +func (g LabeledUndirected) Degeneracy() (k int) { + a := g.LabeledAdjacencyList + // WP algorithm, attributed to Matula and Beck. + L := bits.New(len(a)) + d := make([]int, len(a)) + var D [][]NI + for v, nb := range a { + dv := len(nb) + d[v] = dv + for len(D) <= dv { + D = append(D, nil) + } + D[dv] = append(D[dv], NI(v)) + } + for range a { + // find a non-empty D + i := 0 + for len(D[i]) == 0 { + i++ + } + // k is max(i, k) + if i > k { + k = i + } + // select from D[i] + Di := D[i] + last := len(Di) - 1 + v := Di[last] + // Add v to ordering, remove from Di + L.SetBit(int(v), 1) + D[i] = Di[:last] + // move neighbors + for _, nb := range a[v] { + if L.Bit(int(nb.To)) == 1 { + continue + } + dn := d[nb.To] // old number of neighbors of nb + Ddn := D[dn] // nb is in this list + // remove it from the list + for wx, w := range Ddn { + if w == nb.To { + last := len(Ddn) - 1 + Ddn[wx], Ddn[last] = Ddn[last], Ddn[wx] + D[dn] = Ddn[:last] + } + } + dn-- // new number of neighbors + d[nb.To] = dn + // re--add it to it's new list + D[dn] = append(D[dn], nb.To) + } + } + return +} + +// DegeneracyOrdering computes degeneracy node ordering and k-core breaks. +// +// See Wikipedia https://en.wikipedia.org/wiki/Degeneracy_(graph_theory) +// +// In return value ordering, nodes are ordered by their "coreness" as +// defined at https://en.wikipedia.org/wiki/Degeneracy_(graph_theory)#k-Cores. +// +// Return value kbreaks indexes ordering by coreness number. len(kbreaks) +// will be one more than the graph degeneracy as returned by the Degeneracy +// method. If degeneracy is d, d = len(kbreaks) - 1, kbreaks[d] is the last +// value in kbreaks and ordering[:kbreaks[d]] contains nodes of the d-cores +// of the graph. kbreaks[0] is always the number of nodes in g as all nodes +// are in in a 0-core. +// +// Note that definitions of "k-core" differ on whether a k-core must be a +// single connected component. This method does not resolve individual +// connected components. +// +// See also method Degeneracy which returns just the degeneracy number. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledUndirected) DegeneracyOrdering() (ordering []NI, kbreaks []int) { a := g.LabeledAdjacencyList // WP algorithm + k := 0 ordering = make([]NI, len(a)) - var L Bits + kbreaks = []int{len(a)} + L := bits.New(len(a)) d := make([]int, len(a)) var D [][]NI for v, nb := range a { @@ -464,7 +660,7 @@ func (g LabeledUndirected) Degeneracy() (k int, ordering []NI, cores []int) { } D[dv] = append(D[dv], NI(v)) } - for ox := range a { + for ox := len(a) - 1; ox >= 0; ox-- { // find a non-empty D i := 0 for len(D[i]) == 0 { @@ -472,10 +668,9 @@ func (g LabeledUndirected) Degeneracy() (k int, ordering []NI, cores []int) { } // k is max(i, k) if i > k { - for len(cores) <= i { - cores = append(cores, 0) + for len(kbreaks) <= i { + kbreaks = append(kbreaks, ox+1) } - cores[k] = ox k = i } // select from D[i] @@ -484,11 +679,11 @@ func (g LabeledUndirected) Degeneracy() (k int, ordering []NI, cores []int) { v := Di[last] // Add v to ordering, remove from Di ordering[ox] = v - L.SetBit(v, 1) + L.SetBit(int(v), 1) D[i] = Di[:last] // move neighbors for _, nb := range a[v] { - if L.Bit(nb.To) == 1 { + if L.Bit(int(nb.To)) == 1 { continue } dn := d[nb.To] // old number of neighbors of nb @@ -507,7 +702,9 @@ func (g LabeledUndirected) Degeneracy() (k int, ordering []NI, cores []int) { D[dn] = append(D[dn], nb.To) } } - cores[k] = len(ordering) + //for i, j := 0, k; i < j; i, j = i+1, j-1 { + // kbreaks[i], kbreaks[j] = kbreaks[j], kbreaks[i] + //} return } @@ -531,57 +728,291 @@ func (g LabeledUndirected) Degree(n NI) int { return d } -// FromList constructs a FromList representing the tree reachable from -// the given root. +// DegreeCentralization returns the degree centralization metric of a graph. +// +// Degree of a node is one measure of node centrality and is directly +// available from the adjacency list representation. This allows degree +// centralization for the graph to be very efficiently computed. +// +// The value returned is from 0 to 1 inclusive for simple graphs of three or +// more nodes. As a special case, 0 is returned for graphs of two or fewer +// nodes. The value returned can be > 1 for graphs with loops or parallel +// edges. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledUndirected) DegreeCentralization() float64 { + a := g.LabeledAdjacencyList + if len(a) <= 2 { + return 0 + } + var max, sum int + for _, to := range a { + if len(to) > max { + max = len(to) + } + sum += len(to) + } + return float64(len(a)*max-sum) / float64((len(a)-1)*(len(a)-2)) +} + +// Density returns density for a simple graph. // -// The connected component containing root should represent a simple graph, -// connected as a tree. +// See also Density function. // -// For nodes connected as a tree, the Path member of the returned FromList -// will be populated with both From and Len values. The MaxLen member will be -// set but Leaves will be left a zero value. Return value cycle will be -1. +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledUndirected) Density() float64 { + return Density(g.Order(), g.Size()) +} + +// Eulerian scans an undirected graph to determine if it is Eulerian. +// +// If the graph represents an Eulerian cycle, it returns -1, -1, nil. // -// If the connected component containing root is not connected as a tree, -// a cycle will be detected. The returned FromList will be a zero value and -// return value cycle will be a node involved in the cycle. +// If the graph does not represent an Eulerian cycle but does represent an +// Eulerian path, it returns the two end nodes of the path, and nil. // -// Loops and parallel edges will be detected as cycles, however only in the -// connected component containing root. If g is not fully connected, nodes -// not reachable from root will have PathEnd values of {From: -1, Len: 0}. +// Otherwise it returns an error. +// +// See also method EulerianStart, which short-circuits as soon as it finds +// a node that must be a start or end node of an Eulerian path. // // There are equivalent labeled and unlabeled versions of this method. -func (g LabeledUndirected) FromList(root NI) (f FromList, cycle NI) { - p := make([]PathEnd, len(g.LabeledAdjacencyList)) - for i := range p { - p[i].From = -1 +func (g LabeledUndirected) Eulerian() (end1, end2 NI, err error) { + end1 = -1 + end2 = -1 + for n := range g.LabeledAdjacencyList { + switch { + case g.Degree(NI(n))%2 == 0: + case end1 < 0: + end1 = NI(n) + case end2 < 0: + end2 = NI(n) + default: + err = errors.New("non-Eulerian") + return + } } - ml := 0 - var df func(NI, NI) bool - df = func(fr, n NI) bool { - l := p[n].Len + 1 - for _, to := range g.LabeledAdjacencyList[n] { - if to.To == fr { - continue - } - if p[to.To].Len > 0 { - cycle = to.To - return false - } - p[to.To] = PathEnd{From: n, Len: l} - if l > ml { - ml = l - } - if !df(n, to.To) { - return false - } + return +} + +// EulerianCycle finds an Eulerian cycle in an undirected multigraph. +// +// * If g has no nodes, result is nil, nil. +// +// * If g is Eulerian, result is an Eulerian cycle with err = nil. +// The first element of the result represents only a start node. +// The remaining elements represent the half arcs of the cycle. +// +// * Otherwise, result is nil, with a non-nil error giving a reason the graph +// is not Eulerian. +// +// Internally, EulerianCycle copies the entire graph g. +// See EulerianCycleD for a more space efficient version. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledUndirected) EulerianCycle() ([]Half, error) { + c, _ := g.Copy() + return c.EulerianCycleD(c.Size()) +} + +// EulerianCycleD finds an Eulerian cycle in an undirected multigraph. +// +// EulerianCycleD is destructive on its receiver g. See EulerianCycle for +// a non-destructive version. +// +// Parameter m must be the size of the undirected graph -- the +// number of edges. Use Undirected.Size if the size is unknown. +// +// * If g has no nodes, result is nil, nil. +// +// * If g is Eulerian, result is an Eulerian cycle with err = nil. +// The first element of the result represents only a start node. +// The remaining elements represent the half arcs of the cycle. +// +// * Otherwise, result is nil, with a non-nil error giving a reason the graph +// is not Eulerian. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledUndirected) EulerianCycleD(m int) ([]Half, error) { + if g.Order() == 0 { + return nil, nil + } + e := newLabEulerian(g.LabeledAdjacencyList, m) + e.p[0] = Half{0, -1} + for e.s >= 0 { + v := e.top() + if err := e.pushUndir(); err != nil { + return nil, err } - return true + if e.top().To != v.To { + return nil, errors.New("not Eulerian") + } + e.keep() } - p[root].Len = 1 - if !df(-1, root) { - return + if !e.uv.AllZeros() { + return nil, errors.New("not strongly connected") } - return FromList{Paths: p, MaxLen: ml}, -1 + return e.p, nil +} + +// EulerianPath finds an Eulerian path in an undirected multigraph. +// +// * If g has no nodes, result is nil, nil. +// +// * If g has an Eulerian path, result is an Eulerian path with err = nil. +// The first element of the result represents only a start node. +// The remaining elements represent the half arcs of the path. +// +// * Otherwise, result is nil, with a non-nil error giving a reason the graph +// is not Eulerian. +// +// Internally, EulerianPath copies the entire graph g. +// See EulerianPathD for a more space efficient version. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledUndirected) EulerianPath() ([]Half, error) { + c, _ := g.Copy() + start := c.EulerianStart() + if start < 0 { + start = 0 + } + return c.EulerianPathD(c.Size(), start) +} + +// EulerianPathD finds an Eulerian path in a undirected multigraph. +// +// EulerianPathD is destructive on its receiver g. See EulerianPath for +// a non-destructive version. +// +// Argument m must be the correct size, or number of edges in g. +// Argument start must be a valid start node for the path. +// +// * If g has no nodes, result is nil, nil. +// +// * If g has an Eulerian path starting at start, result is an Eulerian path +// with err = nil. +// The first element of the result represents only a start node. +// The remaining elements represent the half arcs of the path. +// +// * Otherwise, result is nil, with a non-nil error giving a reason the graph +// is not Eulerian. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledUndirected) EulerianPathD(m int, start NI) ([]Half, error) { + if g.Order() == 0 { + return nil, nil + } + e := newLabEulerian(g.LabeledAdjacencyList, m) + e.p[0] = Half{start, -1} + // unlike EulerianCycle, the first path doesn't have to be a cycle. + if err := e.pushUndir(); err != nil { + return nil, err + } + e.keep() + for e.s >= 0 { + start = e.top().To + e.push() + // paths after the first must be cycles though + // (as long as there are nodes on the stack) + if e.top().To != start { + return nil, errors.New("no Eulerian path") + } + e.keep() + } + if !e.uv.AllZeros() { + return nil, errors.New("no Eulerian path") + } + return e.p, nil +} + +// EulerianStart finds a candidate start node for an Eulerian path. +// +// A graph representing an Eulerian path can have two nodes with odd degree. +// If it does, these must be the end nodes of the path. EulerianEnd scans +// for a node with an odd degree, returning immediately with the first one +// it finds. +// +// If the scan completes without finding a node with odd degree the method +// returns -1. +// +// See also method Eulerian, which completely validates a graph as representing +// an Eulerian path. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledUndirected) EulerianStart() NI { + for n := range g.LabeledAdjacencyList { + if g.Degree(NI(n))%2 != 0 { + return NI(n) + } + } + return -1 +} + +// AddNode maps a node in a supergraph to a subgraph node. +// +// Argument p must be an NI in supergraph s.Super. AddNode panics if +// p is not a valid node index of s.Super. +// +// AddNode is idempotent in that it does not add a new node to the subgraph if +// a subgraph node already exists mapped to supergraph node p. +// +// The mapped subgraph NI is returned. +func (s *LabeledUndirectedSubgraph) AddNode(p NI) (b NI) { + if int(p) < 0 || int(p) >= s.Super.Order() { + panic(fmt.Sprint("AddNode: NI ", p, " not in supergraph")) + } + if b, ok := s.SubNI[p]; ok { + return b + } + a := s.LabeledUndirected.LabeledAdjacencyList + b = NI(len(a)) + s.LabeledUndirected.LabeledAdjacencyList = append(a, nil) + s.SuperNI = append(s.SuperNI, p) + s.SubNI[p] = b + return +} + +// InduceList constructs a node-induced subgraph. +// +// The subgraph is induced on receiver graph g. Argument l must be a list of +// NIs in receiver graph g. Receiver g becomes the supergraph of the induced +// subgraph. +// +// Duplicate NIs are allowed in list l. The duplicates are effectively removed +// and only a single corresponding node is created in the subgraph. Subgraph +// NIs are mapped in the order of list l, execpt for ignoring duplicates. +// NIs in l that are not in g will panic. +// +// Returned is the constructed Subgraph object containing the induced subgraph +// and the mappings to the supergraph. +func (g *LabeledUndirected) InduceList(l []NI) *LabeledUndirectedSubgraph { + sub, sup := mapList(l) + return &LabeledUndirectedSubgraph{ + Super: g, + SubNI: sub, + SuperNI: sup, + LabeledUndirected: LabeledUndirected{ + g.LabeledAdjacencyList.induceArcs(sub, sup), + }} +} + +// InduceBits constructs a node-induced subgraph. +// +// The subgraph is induced on receiver graph g. Argument t must be a bitmap +// representing NIs in receiver graph g. Receiver g becomes the supergraph +// of the induced subgraph. NIs in t that are not in g will panic. +// +// Returned is the constructed Subgraph object containing the induced subgraph +// and the mappings to the supergraph. +func (g *LabeledUndirected) InduceBits(t bits.Bits) *LabeledUndirectedSubgraph { + sub, sup := mapBits(t) + return &LabeledUndirectedSubgraph{ + Super: g, + SubNI: sub, + SuperNI: sup, + LabeledUndirected: LabeledUndirected{ + g.LabeledAdjacencyList.induceArcs(sub, sup), + }} } // IsConnected tests if an undirected graph is a single connected component. @@ -594,19 +1025,18 @@ func (g LabeledUndirected) IsConnected() bool { if len(a) == 0 { return true } - var b Bits - b.SetAll(len(a)) + b := bits.New(len(a)) var df func(NI) df = func(n NI) { - b.SetBit(n, 0) + b.SetBit(int(n), 1) for _, to := range a[n] { - if b.Bit(to.To) == 1 { + if b.Bit(int(to.To)) == 0 { df(to.To) } } } df(0) - return b.Zero() + return b.AllOnes() } // IsTree identifies trees in undirected graphs. @@ -618,14 +1048,14 @@ func (g LabeledUndirected) IsConnected() bool { // There are equivalent labeled and unlabeled versions of this method. func (g LabeledUndirected) IsTree(root NI) (isTree, allTree bool) { a := g.LabeledAdjacencyList - var v Bits - v.SetAll(len(a)) + v := bits.New(len(a)) + v.SetAll() var df func(NI, NI) bool df = func(fr, n NI) bool { - if v.Bit(n) == 0 { + if v.Bit(int(n)) == 0 { return false } - v.SetBit(n, 0) + v.SetBit(int(n), 0) for _, to := range a[n] { if to.To != fr && !df(n, to.To) { return false @@ -633,18 +1063,18 @@ func (g LabeledUndirected) IsTree(root NI) (isTree, allTree bool) { } return true } - v.SetBit(root, 0) + v.SetBit(int(root), 0) for _, to := range a[root] { if !df(root, to.To) { return false, false } } - return true, v.Zero() + return true, v.AllZeros() } // Size returns the number of edges in g. // -// See also ArcSize and HasLoop. +// See also ArcSize and AnyLoop. func (g LabeledUndirected) Size() int { m2 := 0 for fr, to := range g.LabeledAdjacencyList { @@ -657,3 +1087,52 @@ func (g LabeledUndirected) Size() int { } return m2 / 2 } + +// Density returns edge density of a bipartite graph. +// +// Edge density is number of edges over maximum possible number of edges. +// Maximum possible number of edges in a bipartite graph is number of +// nodes of one color times number of nodes of the other color. +func (g LabeledBipartite) Density() float64 { + a := g.LabeledUndirected.LabeledAdjacencyList + s := 0 + g.Color.IterateOnes(func(n int) bool { + s += len(a[n]) + return true + }) + return float64(s) / float64(g.N0*(len(a)-g.N0)) +} + +// PermuteBiadjacency permutes a bipartite graph in place so that a prefix +// of the adjacency list encodes a biadjacency matrix. +// +// The permutation applied is returned. This would be helpful in referencing +// any externally stored node information. +// +// The biadjacency matrix is encoded as the prefix AdjacencyList[:g.N0]. +// Note though that this slice does not represent a valid complete +// AdjacencyList. BoundsOk would return false, for example. +// +// In adjacency list terms, the result of the permutation is that nodes of +// the prefix only have arcs to the suffix and nodes of the suffix only have +// arcs to the prefix. +func (g LabeledBipartite) PermuteBiadjacency() []int { + p := make([]int, g.Order()) + i := 0 + g.Color.IterateZeros(func(n int) bool { + p[n] = i + i++ + return true + }) + g.Color.IterateOnes(func(n int) bool { + p[n] = i + i++ + return true + }) + g.Permute(p) + g.Color.ClearAll() + for i := g.N0; i < g.Order(); i++ { + g.Color.SetBit(i, 1) + } + return p +} diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS new file mode 100644 index 00000000..15167cd7 --- /dev/null +++ b/vendor/golang.org/x/net/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS new file mode 100644 index 00000000..1c4577e9 --- /dev/null +++ b/vendor/golang.org/x/net/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go index 56efb95b..839e3a64 100644 --- a/vendor/golang.org/x/net/context/context.go +++ b/vendor/golang.org/x/net/context/context.go @@ -5,9 +5,11 @@ // Package context defines the Context type, which carries deadlines, // cancelation signals, and other request-scoped values across API boundaries // and between processes. +// As of Go 1.7 this package is available in the standard library under the +// name context. https://golang.org/pkg/context. // // Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must +// servers should accept a Context. The chain of function calls between must // propagate the Context, optionally replacing it with a modified copy created // using WithDeadline, WithTimeout, WithCancel, or WithValue. // @@ -16,14 +18,14 @@ // propagation: // // Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first +// explicitly to each function that needs it. The Context should be the first // parameter, typically named ctx: // // func DoSomething(ctx context.Context, arg Arg) error { // // ... use ctx ... // } // -// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// Do not pass a nil Context, even if a function permits it. Pass context.TODO // if you are unsure about which Context to use. // // Use context Values only for request-scoped data that transits processes and @@ -36,112 +38,15 @@ // Contexts. package context -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out <-chan Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - // Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, +// values, and has no deadline. It is typically used by the main function, // initialization, and tests, and as the top-level Context for incoming // requests. func Background() Context { return background } -// TODO returns a non-nil, empty Context. Code should use context.TODO when +// TODO returns a non-nil, empty Context. Code should use context.TODO when // it's unclear which Context to use or it is not yet available (because the // surrounding function has not yet been extended to accept a Context // parameter). TODO is recognized by static analysis tools that determine @@ -149,8 +54,3 @@ func Background() Context { func TODO() Context { return todo } - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go b/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go deleted file mode 100644 index e3170e33..00000000 --- a/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.5 - -package ctxhttp - -import "net/http" - -func canceler(client *http.Client, req *http.Request) func() { - // TODO(djd): Respect any existing value of req.Cancel. - ch := make(chan struct{}) - req.Cancel = ch - - return func() { - close(ch) - } -} diff --git a/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go b/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go deleted file mode 100644 index 56bcbadb..00000000 --- a/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.5 - -package ctxhttp - -import "net/http" - -type requestCanceler interface { - CancelRequest(*http.Request) -} - -func canceler(client *http.Client, req *http.Request) func() { - rc, ok := client.Transport.(requestCanceler) - if !ok { - return func() {} - } - return func() { - rc.CancelRequest(req) - } -} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go index e35860a7..e0df203c 100644 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -1,4 +1,4 @@ -// Copyright 2015 The Go Authors. All rights reserved. +// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -6,84 +6,35 @@ package ctxhttp import ( + "context" "io" "net/http" "net/url" "strings" - - "golang.org/x/net/context" -) - -func nop() {} - -var ( - testHookContextDoneBeforeHeaders = nop - testHookDoReturned = nop - testHookDidBodyClose = nop ) -// Do sends an HTTP request with the provided http.Client and returns an HTTP response. +// Do sends an HTTP request with the provided http.Client and returns +// an HTTP response. +// // If the client is nil, http.DefaultClient is used. -// If the context is canceled or times out, ctx.Err() will be returned. +// +// The provided ctx must be non-nil. If it is canceled or times out, +// ctx.Err() will be returned. func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { if client == nil { client = http.DefaultClient } - - // Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go. - cancel := canceler(client, req) - - type responseAndError struct { - resp *http.Response - err error - } - result := make(chan responseAndError, 1) - - // Make local copies of test hooks closed over by goroutines below. - // Prevents data races in tests. - testHookDoReturned := testHookDoReturned - testHookDidBodyClose := testHookDidBodyClose - - go func() { - resp, err := client.Do(req) - testHookDoReturned() - result <- responseAndError{resp, err} - }() - - var resp *http.Response - - select { - case <-ctx.Done(): - testHookContextDoneBeforeHeaders() - cancel() - // Clean up after the goroutine calling client.Do: - go func() { - if r := <-result; r.resp != nil { - testHookDidBodyClose() - r.resp.Body.Close() - } - }() - return nil, ctx.Err() - case r := <-result: - var err error - resp, err = r.resp, r.err - if err != nil { - return resp, err - } - } - - c := make(chan struct{}) - go func() { + resp, err := client.Do(req.WithContext(ctx)) + // If we got an error, and the context has been canceled, + // the context's error is probably more useful. + if err != nil { select { case <-ctx.Done(): - cancel() - case <-c: - // The response's Body is closed. + err = ctx.Err() + default: } - }() - resp.Body = ¬ifyingReader{resp.Body, c} - - return resp, nil + } + return resp, err } // Get issues a GET request via the Do function. @@ -118,28 +69,3 @@ func Post(ctx context.Context, client *http.Client, url string, bodyType string, func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) } - -// notifyingReader is an io.ReadCloser that closes the notify channel after -// Close is called or a Read fails on the underlying ReadCloser. -type notifyingReader struct { - io.ReadCloser - notify chan<- struct{} -} - -func (r *notifyingReader) Read(p []byte) (int, error) { - n, err := r.ReadCloser.Read(p) - if err != nil && r.notify != nil { - close(r.notify) - r.notify = nil - } - return n, err -} - -func (r *notifyingReader) Close() error { - err := r.ReadCloser.Close() - if r.notify != nil { - close(r.notify) - r.notify = nil - } - return err -} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go index f8cda19a..d20f52b7 100644 --- a/vendor/golang.org/x/net/context/go17.go +++ b/vendor/golang.org/x/net/context/go17.go @@ -35,8 +35,8 @@ func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { } // WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned // context's Done channel is closed when the deadline expires, when the returned // cancel function is called, or when the parent context's Done channel is // closed, whichever happens first. diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go new file mode 100644 index 00000000..d88bd1db --- /dev/null +++ b/vendor/golang.org/x/net/context/go19.go @@ -0,0 +1,20 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package context + +import "context" // standard library's context, as of Go 1.7 + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go index 5a30acab..0f35592d 100644 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -13,7 +13,7 @@ import ( "time" ) -// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// An emptyCtx is never canceled, has no values, and has no deadline. It is not // struct{}, since vars of this type must have distinct addresses. type emptyCtx int @@ -104,7 +104,7 @@ func propagateCancel(parent Context, child canceler) { } // parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this +// *cancelCtx. This function understands how each of the concrete types in this // package represents its parent. func parentCancelCtx(parent Context) (*cancelCtx, bool) { for { @@ -134,14 +134,14 @@ func removeChild(parent Context, child canceler) { p.mu.Unlock() } -// A canceler is a context type that can be canceled directly. The +// A canceler is a context type that can be canceled directly. The // implementations are *cancelCtx and *timerCtx. type canceler interface { cancel(removeFromParent bool, err error) Done() <-chan struct{} } -// A cancelCtx can be canceled. When canceled, it also cancels any children +// A cancelCtx can be canceled. When canceled, it also cancels any children // that implement canceler. type cancelCtx struct { Context @@ -193,8 +193,8 @@ func (c *cancelCtx) cancel(removeFromParent bool, err error) { } // WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned // context's Done channel is closed when the deadline expires, when the returned // cancel function is called, or when the parent context's Done channel is // closed, whichever happens first. @@ -226,8 +226,8 @@ func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { return c, func() { c.cancel(true, Canceled) } } -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then // delegating to cancelCtx.cancel. type timerCtx struct { *cancelCtx @@ -281,7 +281,7 @@ func WithValue(parent Context, key interface{}, val interface{}) Context { return &valueCtx{parent, key, val} } -// A valueCtx carries a key-value pair. It implements Value for that key and +// A valueCtx carries a key-value pair. It implements Value for that key and // delegates all other calls to the embedded Context. type valueCtx struct { Context diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go new file mode 100644 index 00000000..b105f80b --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go19.go @@ -0,0 +1,109 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package context + +import "time" + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out chan<- Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() diff --git a/vendor/golang.org/x/oauth2/.travis.yml b/vendor/golang.org/x/oauth2/.travis.yml index a035125c..fa139db2 100644 --- a/vendor/golang.org/x/oauth2/.travis.yml +++ b/vendor/golang.org/x/oauth2/.travis.yml @@ -1,8 +1,7 @@ language: go go: - - 1.3 - - 1.4 + - tip install: - export GOPATH="$HOME/gopath" diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md index 46aa2b12..dfbed62c 100644 --- a/vendor/golang.org/x/oauth2/CONTRIBUTING.md +++ b/vendor/golang.org/x/oauth2/CONTRIBUTING.md @@ -4,16 +4,15 @@ Go is an open source project. It is the work of hundreds of contributors. We appreciate your help! - ## Filing issues When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. The gophers there will answer or ask you to file an issue if you've tripped over a bug. @@ -23,9 +22,5 @@ The gophers there will answer or ask you to file an issue if you've tripped over Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) before sending patches. -**We do not accept GitHub pull requests** -(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). - Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file. - diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE index d02f24fd..6a66aea5 100644 --- a/vendor/golang.org/x/oauth2/LICENSE +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The oauth2 Authors. All rights reserved. +Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 0d514173..68f436ed 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -1,6 +1,7 @@ # OAuth2 for Go [![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) +[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2) oauth2 package contains a client implementation for OAuth 2.0 spec. @@ -10,6 +11,9 @@ oauth2 package contains a client implementation for OAuth 2.0 spec. go get golang.org/x/oauth2 ~~~~ +Or you can manually git clone the repository to +`$(go env GOPATH)/src/golang.org/x/oauth2`. + See godoc for further documentation and examples. * [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) @@ -18,11 +22,13 @@ See godoc for further documentation and examples. ## App Engine -In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor +In change 96e89be (March 2015), we removed the `oauth2.Context2` type in favor of the [`context.Context`](https://golang.org/x/net/context#Context) type from -the `golang.org/x/net/context` package +the `golang.org/x/net/context` package. Later replaced by the standard `context` package +of the [`context.Context`](https://golang.org/pkg/context#Context) type. + -This means its no longer possible to use the "Classic App Engine" +This means it's no longer possible to use the "Classic App Engine" `appengine.Context` type with the `oauth2` package. (You're using Classic App Engine if you import the package `"appengine"`.) @@ -38,27 +44,43 @@ If you don't want to update your entire app to use the new App Engine packages, you may use both sets of packages in parallel, using only the new packages with the `oauth2` package. - import ( - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - newappengine "google.golang.org/appengine" - newurlfetch "google.golang.org/appengine/urlfetch" - - "appengine" - ) - - func handler(w http.ResponseWriter, r *http.Request) { - var c appengine.Context = appengine.NewContext(r) - c.Infof("Logging a message with the old package") - - var ctx context.Context = newappengine.NewContext(r) - client := &http.Client{ - Transport: &oauth2.Transport{ - Source: google.AppEngineTokenSource(ctx, "scope"), - Base: &newurlfetch.Transport{Context: ctx}, - }, - } - client.Get("...") +```go +import ( + "context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + newappengine "google.golang.org/appengine" + newurlfetch "google.golang.org/appengine/urlfetch" + + "appengine" +) + +func handler(w http.ResponseWriter, r *http.Request) { + var c appengine.Context = appengine.NewContext(r) + c.Infof("Logging a message with the old package") + + var ctx context.Context = newappengine.NewContext(r) + client := &http.Client{ + Transport: &oauth2.Transport{ + Source: google.AppEngineTokenSource(ctx, "scope"), + Base: &newurlfetch.Transport{Context: ctx}, + }, } + client.Get("...") +} +``` + +## Policy for new packages + +We no longer accept new provider-specific packages in this repo. For +defining provider endpoints and provider-specific OAuth2 behavior, we +encourage you to create packages elsewhere. We'll keep the existing +packages for compatibility. + +## Report Issues / Send Patches + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. +The main issue tracker for the oauth2 repository is located at +https://github.com/golang/oauth2/issues. diff --git a/vendor/golang.org/x/oauth2/client_appengine.go b/vendor/golang.org/x/oauth2/client_appengine.go deleted file mode 100644 index 8962c49d..00000000 --- a/vendor/golang.org/x/oauth2/client_appengine.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appengine - -// App Engine hooks. - -package oauth2 - -import ( - "net/http" - - "golang.org/x/net/context" - "golang.org/x/oauth2/internal" - "google.golang.org/appengine/urlfetch" -) - -func init() { - internal.RegisterContextClientFunc(contextClientAppEngine) -} - -func contextClientAppEngine(ctx context.Context) (*http.Client, error) { - return urlfetch.Client(ctx), nil -} diff --git a/vendor/golang.org/x/oauth2/go.mod b/vendor/golang.org/x/oauth2/go.mod new file mode 100644 index 00000000..b3457815 --- /dev/null +++ b/vendor/golang.org/x/oauth2/go.mod @@ -0,0 +1,10 @@ +module golang.org/x/oauth2 + +go 1.11 + +require ( + cloud.google.com/go v0.34.0 + golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect + google.golang.org/appengine v1.4.0 +) diff --git a/vendor/golang.org/x/oauth2/go.sum b/vendor/golang.org/x/oauth2/go.sum new file mode 100644 index 00000000..6f0079b0 --- /dev/null +++ b/vendor/golang.org/x/oauth2/go.sum @@ -0,0 +1,12 @@ +cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e h1:bRhVy7zSSasaqNksaRZiA5EEI+Ei4I1nO5Jh72wfHlg= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go new file mode 100644 index 00000000..feb1157b --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine.go @@ -0,0 +1,38 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "context" + "time" + + "golang.org/x/oauth2" +) + +// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible. +var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) + +// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible. +var appengineAppIDFunc func(c context.Context) string + +// AppEngineTokenSource returns a token source that fetches tokens from either +// the current application's service account or from the metadata server, +// depending on the App Engine environment. See below for environment-specific +// details. If you are implementing a 3-legged OAuth 2.0 flow on App Engine that +// involves user accounts, see oauth2.Config instead. +// +// First generation App Engine runtimes (<= Go 1.9): +// AppEngineTokenSource returns a token source that fetches tokens issued to the +// current App Engine application's service account. The provided context must have +// come from appengine.NewContext. +// +// Second generation App Engine runtimes (>= Go 1.11) and App Engine flexible: +// AppEngineTokenSource is DEPRECATED on second generation runtimes and on the +// flexible environment. It delegates to ComputeTokenSource, and the provided +// context and scopes are not used. Please use DefaultTokenSource (or ComputeTokenSource, +// which DefaultTokenSource will use in this case) instead. +func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + return appEngineTokenSource(ctx, scope...) +} diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/vendor/golang.org/x/oauth2/google/appengine_gen1.go new file mode 100644 index 00000000..83dacac3 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine_gen1.go @@ -0,0 +1,77 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +// This file applies to App Engine first generation runtimes (<= Go 1.9). + +package google + +import ( + "context" + "sort" + "strings" + "sync" + + "golang.org/x/oauth2" + "google.golang.org/appengine" +) + +func init() { + appengineTokenFunc = appengine.AccessToken + appengineAppIDFunc = appengine.AppID +} + +// See comment on AppEngineTokenSource in appengine.go. +func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + scopes := append([]string{}, scope...) + sort.Strings(scopes) + return &gaeTokenSource{ + ctx: ctx, + scopes: scopes, + key: strings.Join(scopes, " "), + } +} + +// aeTokens helps the fetched tokens to be reused until their expiration. +var ( + aeTokensMu sync.Mutex + aeTokens = make(map[string]*tokenLock) // key is space-separated scopes +) + +type tokenLock struct { + mu sync.Mutex // guards t; held while fetching or updating t + t *oauth2.Token +} + +type gaeTokenSource struct { + ctx context.Context + scopes []string + key string // to aeTokens map; space-separated scopes +} + +func (ts *gaeTokenSource) Token() (*oauth2.Token, error) { + aeTokensMu.Lock() + tok, ok := aeTokens[ts.key] + if !ok { + tok = &tokenLock{} + aeTokens[ts.key] = tok + } + aeTokensMu.Unlock() + + tok.mu.Lock() + defer tok.mu.Unlock() + if tok.t.Valid() { + return tok.t, nil + } + access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) + if err != nil { + return nil, err + } + tok.t = &oauth2.Token{ + AccessToken: access, + Expiry: exp, + } + return tok.t, nil +} diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go new file mode 100644 index 00000000..04c2c221 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go @@ -0,0 +1,27 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine + +// This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible. + +package google + +import ( + "context" + "log" + "sync" + + "golang.org/x/oauth2" +) + +var logOnce sync.Once // only spam about deprecation once + +// See comment on AppEngineTokenSource in appengine.go. +func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + logOnce.Do(func() { + log.Print("google: AppEngineTokenSource is deprecated on App Engine standard second generation runtimes (>= Go 1.11) and App Engine flexible. Please use DefaultTokenSource or ComputeTokenSource.") + }) + return ComputeTokenSource("") +} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go new file mode 100644 index 00000000..5087d845 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -0,0 +1,155 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "runtime" + + "cloud.google.com/go/compute/metadata" + "golang.org/x/oauth2" +) + +// Credentials holds Google credentials, including "Application Default Credentials". +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +type Credentials struct { + ProjectID string // may be empty + TokenSource oauth2.TokenSource + + // JSON contains the raw bytes from a JSON credentials file. + // This field may be nil if authentication is provided by the + // environment and not with a credentials file, e.g. when code is + // running on Google Cloud Platform. + JSON []byte +} + +// DefaultCredentials is the old name of Credentials. +// +// Deprecated: use Credentials instead. +type DefaultCredentials = Credentials + +// DefaultClient returns an HTTP Client that uses the +// DefaultTokenSource to obtain authentication credentials. +func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { + ts, err := DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return oauth2.NewClient(ctx, ts), nil +} + +// DefaultTokenSource returns the token source for +// "Application Default Credentials". +// It is a shortcut for FindDefaultCredentials(ctx, scope).TokenSource. +func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { + creds, err := FindDefaultCredentials(ctx, scope...) + if err != nil { + return nil, err + } + return creds.TokenSource, nil +} + +// FindDefaultCredentials searches for "Application Default Credentials". +// +// It looks for credentials in the following places, +// preferring the first location found: +// +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google App Engine standard first generation runtimes (<= Go 1.9) it uses +// the appengine.AccessToken function. +// 4. On Google Compute Engine, Google App Engine standard second generation runtimes +// (>= Go 1.11), and Google App Engine flexible environment, it fetches +// credentials from the metadata server. +// (In this final case any provided scopes are ignored.) +func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials, error) { + // First, try the environment variable. + const envVar = "GOOGLE_APPLICATION_CREDENTIALS" + if filename := os.Getenv(envVar); filename != "" { + creds, err := readCredentialsFile(ctx, filename, scopes) + if err != nil { + return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) + } + return creds, nil + } + + // Second, try a well-known file. + filename := wellKnownFile() + if creds, err := readCredentialsFile(ctx, filename, scopes); err == nil { + return creds, nil + } else if !os.IsNotExist(err) { + return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) + } + + // Third, if we're on a Google App Engine standard first generation runtime (<= Go 1.9) + // use those credentials. App Engine standard second generation runtimes (>= Go 1.11) + // and App Engine flexible use ComputeTokenSource and the metadata server. + if appengineTokenFunc != nil { + return &DefaultCredentials{ + ProjectID: appengineAppIDFunc(ctx), + TokenSource: AppEngineTokenSource(ctx, scopes...), + }, nil + } + + // Fourth, if we're on Google Compute Engine, an App Engine standard second generation runtime, + // or App Engine flexible, use the metadata server. + if metadata.OnGCE() { + id, _ := metadata.ProjectID() + return &DefaultCredentials{ + ProjectID: id, + TokenSource: ComputeTokenSource(""), + }, nil + } + + // None are found; return helpful error. + const url = "https://developers.google.com/accounts/docs/application-default-credentials" + return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) +} + +// CredentialsFromJSON obtains Google credentials from a JSON value. The JSON can +// represent either a Google Developers Console client_credentials.json file (as in +// ConfigFromJSON) or a Google Developers service account key file (as in +// JWTConfigFromJSON). +func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) { + var f credentialsFile + if err := json.Unmarshal(jsonData, &f); err != nil { + return nil, err + } + ts, err := f.tokenSource(ctx, append([]string(nil), scopes...)) + if err != nil { + return nil, err + } + return &DefaultCredentials{ + ProjectID: f.ProjectID, + TokenSource: ts, + JSON: jsonData, + }, nil +} + +func wellKnownFile() string { + const f = "application_default_credentials.json" + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) + } + return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) +} + +func readCredentialsFile(ctx context.Context, filename string, scopes []string) (*DefaultCredentials, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return CredentialsFromJSON(ctx, b, scopes...) +} diff --git a/vendor/golang.org/x/oauth2/google/doc.go b/vendor/golang.org/x/oauth2/google/doc.go new file mode 100644 index 00000000..7a961ee2 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/doc.go @@ -0,0 +1,40 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package google provides support for making OAuth2 authorized and authenticated +// HTTP requests to Google APIs. It supports the Web server flow, client-side +// credentials, service accounts, Google Compute Engine service accounts, and Google +// App Engine service accounts. +// +// A brief overview of the package follows. For more information, please read +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. +// +// OAuth2 Configs +// +// Two functions in this package return golang.org/x/oauth2.Config values from Google credential +// data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON, +// the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or +// create an http.Client. +// +// +// Credentials +// +// The Credentials type represents Google credentials, including Application Default +// Credentials. +// +// Use FindDefaultCredentials to obtain Application Default Credentials. +// FindDefaultCredentials looks in some well-known places for a credentials file, and +// will call AppEngineTokenSource or ComputeTokenSource as needed. +// +// DefaultClient and DefaultTokenSource are convenience methods. They first call FindDefaultCredentials, +// then use the credentials to construct an http.Client or an oauth2.TokenSource. +// +// Use CredentialsFromJSON to obtain credentials from either of the two JSON formats +// described in OAuth2 Configs, above. The TokenSource in the returned value is the +// same as the one obtained from the oauth2.Config returned from ConfigFromJSON or +// JWTConfigFromJSON, but the Credentials may contain additional information +// that is useful is some circumstances. +package google diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go new file mode 100644 index 00000000..ca7d208d --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -0,0 +1,192 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "cloud.google.com/go/compute/metadata" + "golang.org/x/oauth2" + "golang.org/x/oauth2/jwt" +) + +// Endpoint is Google's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://accounts.google.com/o/oauth2/token", +} + +// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. +const JWTTokenURL = "https://accounts.google.com/o/oauth2/token" + +// ConfigFromJSON uses a Google Developers Console client_credentials.json +// file to construct a config. +// client_credentials.json can be downloaded from +// https://console.developers.google.com, under "Credentials". Download the Web +// application credentials in the JSON format and provide the contents of the +// file as jsonKey. +func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { + type cred struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RedirectURIs []string `json:"redirect_uris"` + AuthURI string `json:"auth_uri"` + TokenURI string `json:"token_uri"` + } + var j struct { + Web *cred `json:"web"` + Installed *cred `json:"installed"` + } + if err := json.Unmarshal(jsonKey, &j); err != nil { + return nil, err + } + var c *cred + switch { + case j.Web != nil: + c = j.Web + case j.Installed != nil: + c = j.Installed + default: + return nil, fmt.Errorf("oauth2/google: no credentials found") + } + if len(c.RedirectURIs) < 1 { + return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") + } + return &oauth2.Config{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + RedirectURL: c.RedirectURIs[0], + Scopes: scope, + Endpoint: oauth2.Endpoint{ + AuthURL: c.AuthURI, + TokenURL: c.TokenURI, + }, + }, nil +} + +// JWTConfigFromJSON uses a Google Developers service account JSON key file to read +// the credentials that authorize and authenticate the requests. +// Create a service account on "Credentials" for your project at +// https://console.developers.google.com to download a JSON key file. +func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { + var f credentialsFile + if err := json.Unmarshal(jsonKey, &f); err != nil { + return nil, err + } + if f.Type != serviceAccountKey { + return nil, fmt.Errorf("google: read JWT from JSON credentials: 'type' field is %q (expected %q)", f.Type, serviceAccountKey) + } + scope = append([]string(nil), scope...) // copy + return f.jwtConfig(scope), nil +} + +// JSON key file types. +const ( + serviceAccountKey = "service_account" + userCredentialsKey = "authorized_user" +) + +// credentialsFile is the unmarshalled representation of a credentials file. +type credentialsFile struct { + Type string `json:"type"` // serviceAccountKey or userCredentialsKey + + // Service Account fields + ClientEmail string `json:"client_email"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + TokenURL string `json:"token_uri"` + ProjectID string `json:"project_id"` + + // User Credential fields + // (These typically come from gcloud auth.) + ClientSecret string `json:"client_secret"` + ClientID string `json:"client_id"` + RefreshToken string `json:"refresh_token"` +} + +func (f *credentialsFile) jwtConfig(scopes []string) *jwt.Config { + cfg := &jwt.Config{ + Email: f.ClientEmail, + PrivateKey: []byte(f.PrivateKey), + PrivateKeyID: f.PrivateKeyID, + Scopes: scopes, + TokenURL: f.TokenURL, + } + if cfg.TokenURL == "" { + cfg.TokenURL = JWTTokenURL + } + return cfg +} + +func (f *credentialsFile) tokenSource(ctx context.Context, scopes []string) (oauth2.TokenSource, error) { + switch f.Type { + case serviceAccountKey: + cfg := f.jwtConfig(scopes) + return cfg.TokenSource(ctx), nil + case userCredentialsKey: + cfg := &oauth2.Config{ + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + Scopes: scopes, + Endpoint: Endpoint, + } + tok := &oauth2.Token{RefreshToken: f.RefreshToken} + return cfg.TokenSource(ctx, tok), nil + case "": + return nil, errors.New("missing 'type' field in credentials") + default: + return nil, fmt.Errorf("unknown credential type: %q", f.Type) + } +} + +// ComputeTokenSource returns a token source that fetches access tokens +// from Google Compute Engine (GCE)'s metadata server. It's only valid to use +// this token source if your program is running on a GCE instance. +// If no account is specified, "default" is used. +// Further information about retrieving access tokens from the GCE metadata +// server can be found at https://cloud.google.com/compute/docs/authentication. +func ComputeTokenSource(account string) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, computeSource{account: account}) +} + +type computeSource struct { + account string +} + +func (cs computeSource) Token() (*oauth2.Token, error) { + if !metadata.OnGCE() { + return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE") + } + acct := cs.account + if acct == "" { + acct = "default" + } + tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token") + if err != nil { + return nil, err + } + var res struct { + AccessToken string `json:"access_token"` + ExpiresInSec int `json:"expires_in"` + TokenType string `json:"token_type"` + } + err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res) + if err != nil { + return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err) + } + if res.ExpiresInSec == 0 || res.AccessToken == "" { + return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") + } + return &oauth2.Token{ + AccessToken: res.AccessToken, + TokenType: res.TokenType, + Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), + }, nil +} diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go new file mode 100644 index 00000000..b0fdb3a8 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/jwt.go @@ -0,0 +1,74 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "crypto/rsa" + "fmt" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON +// key file to read the credentials that authorize and authenticate the +// requests, and returns a TokenSource that does not use any OAuth2 flow but +// instead creates a JWT and sends that as the access token. +// The audience is typically a URL that specifies the scope of the credentials. +// +// Note that this is not a standard OAuth flow, but rather an +// optimization supported by a few Google services. +// Unless you know otherwise, you should use JWTConfigFromJSON instead. +func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { + cfg, err := JWTConfigFromJSON(jsonKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse JSON key: %v", err) + } + pk, err := internal.ParseKey(cfg.PrivateKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse key: %v", err) + } + ts := &jwtAccessTokenSource{ + email: cfg.Email, + audience: audience, + pk: pk, + pkID: cfg.PrivateKeyID, + } + tok, err := ts.Token() + if err != nil { + return nil, err + } + return oauth2.ReuseTokenSource(tok, ts), nil +} + +type jwtAccessTokenSource struct { + email, audience string + pk *rsa.PrivateKey + pkID string +} + +func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { + iat := time.Now() + exp := iat.Add(time.Hour) + cs := &jws.ClaimSet{ + Iss: ts.email, + Sub: ts.email, + Aud: ts.audience, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + hdr := &jws.Header{ + Algorithm: "RS256", + Typ: "JWT", + KeyID: string(ts.pkID), + } + msg, err := jws.Encode(hdr, cs, ts.pk) + if err != nil { + return nil, fmt.Errorf("google: could not encode JWT: %v", err) + } + return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil +} diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go new file mode 100644 index 00000000..456224bc --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/sdk.go @@ -0,0 +1,201 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "bufio" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "os/user" + "path/filepath" + "runtime" + "strings" + "time" + + "golang.org/x/oauth2" +) + +type sdkCredentials struct { + Data []struct { + Credential struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + TokenExpiry *time.Time `json:"token_expiry"` + } `json:"credential"` + Key struct { + Account string `json:"account"` + Scope string `json:"scope"` + } `json:"key"` + } +} + +// An SDKConfig provides access to tokens from an account already +// authorized via the Google Cloud SDK. +type SDKConfig struct { + conf oauth2.Config + initialToken *oauth2.Token +} + +// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK +// account. If account is empty, the account currently active in +// Google Cloud SDK properties is used. +// Google Cloud SDK credentials must be created by running `gcloud auth` +// before using this function. +// The Google Cloud SDK is available at https://cloud.google.com/sdk/. +func NewSDKConfig(account string) (*SDKConfig, error) { + configPath, err := sdkConfigPath() + if err != nil { + return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) + } + credentialsPath := filepath.Join(configPath, "credentials") + f, err := os.Open(credentialsPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) + } + defer f.Close() + + var c sdkCredentials + if err := json.NewDecoder(f).Decode(&c); err != nil { + return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) + } + if len(c.Data) == 0 { + return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) + } + if account == "" { + propertiesPath := filepath.Join(configPath, "properties") + f, err := os.Open(propertiesPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) + } + defer f.Close() + ini, err := parseINI(f) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) + } + core, ok := ini["core"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) + } + active, ok := core["account"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) + } + account = active + } + + for _, d := range c.Data { + if account == "" || d.Key.Account == account { + if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { + return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) + } + var expiry time.Time + if d.Credential.TokenExpiry != nil { + expiry = *d.Credential.TokenExpiry + } + return &SDKConfig{ + conf: oauth2.Config{ + ClientID: d.Credential.ClientID, + ClientSecret: d.Credential.ClientSecret, + Scopes: strings.Split(d.Key.Scope, " "), + Endpoint: Endpoint, + RedirectURL: "oob", + }, + initialToken: &oauth2.Token{ + AccessToken: d.Credential.AccessToken, + RefreshToken: d.Credential.RefreshToken, + Expiry: expiry, + }, + }, nil + } + } + return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) +} + +// Client returns an HTTP client using Google Cloud SDK credentials to +// authorize requests. The token will auto-refresh as necessary. The +// underlying http.RoundTripper will be obtained using the provided +// context. The returned client and its Transport should not be +// modified. +func (c *SDKConfig) Client(ctx context.Context) *http.Client { + return &http.Client{ + Transport: &oauth2.Transport{ + Source: c.TokenSource(ctx), + }, + } +} + +// TokenSource returns an oauth2.TokenSource that retrieve tokens from +// Google Cloud SDK credentials using the provided context. +// It will returns the current access token stored in the credentials, +// and refresh it when it expires, but it won't update the credentials +// with the new access token. +func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { + return c.conf.TokenSource(ctx, c.initialToken) +} + +// Scopes are the OAuth 2.0 scopes the current account is authorized for. +func (c *SDKConfig) Scopes() []string { + return c.conf.Scopes +} + +func parseINI(ini io.Reader) (map[string]map[string]string, error) { + result := map[string]map[string]string{ + "": {}, // root section + } + scanner := bufio.NewScanner(ini) + currentSection := "" + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, ";") { + // comment. + continue + } + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + currentSection = strings.TrimSpace(line[1 : len(line)-1]) + result[currentSection] = map[string]string{} + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 && parts[0] != "" { + result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) + } + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning ini: %v", err) + } + return result, nil +} + +// sdkConfigPath tries to guess where the gcloud config is located. +// It can be overridden during tests. +var sdkConfigPath = func() (string, error) { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil + } + homeDir := guessUnixHomeDir() + if homeDir == "" { + return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") + } + return filepath.Join(homeDir, ".config", "gcloud"), nil +} + +func guessUnixHomeDir() string { + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + if v := os.Getenv("HOME"); v != "" { + return v + } + // Else, fall back to user.Current: + if u, err := user.Current(); err == nil { + return u.HomeDir + } + return "" +} diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go new file mode 100644 index 00000000..74348718 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/client_appengine.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +package internal + +import "google.golang.org/appengine/urlfetch" + +func init() { + appengineClientHook = urlfetch.Client +} diff --git a/vendor/golang.org/x/oauth2/internal/doc.go b/vendor/golang.org/x/oauth2/internal/doc.go new file mode 100644 index 00000000..03265e88 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/doc.go @@ -0,0 +1,6 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go index fbe1028d..c0ab196c 100644 --- a/vendor/golang.org/x/oauth2/internal/oauth2.go +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -2,18 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package internal contains support packages for oauth2 package. package internal import ( - "bufio" "crypto/rsa" "crypto/x509" "encoding/pem" "errors" "fmt" - "io" - "strings" ) // ParseKey converts the binary contents of a private key file @@ -30,7 +26,7 @@ func ParseKey(key []byte) (*rsa.PrivateKey, error) { if err != nil { parsedKey, err = x509.ParsePKCS1PrivateKey(key) if err != nil { - return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err) + return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8; parse error: %v", err) } } parsed, ok := parsedKey.(*rsa.PrivateKey) @@ -39,38 +35,3 @@ func ParseKey(key []byte) (*rsa.PrivateKey, error) { } return parsed, nil } - -func ParseINI(ini io.Reader) (map[string]map[string]string, error) { - result := map[string]map[string]string{ - "": map[string]string{}, // root section - } - scanner := bufio.NewScanner(ini) - currentSection := "" - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - if strings.HasPrefix(line, ";") { - // comment. - continue - } - if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { - currentSection = strings.TrimSpace(line[1 : len(line)-1]) - result[currentSection] = map[string]string{} - continue - } - parts := strings.SplitN(line, "=", 2) - if len(parts) == 2 && parts[0] != "" { - result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) - } - } - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("error scanning ini: %v", err) - } - return result, nil -} - -func CondVal(v string) []string { - if v == "" { - return nil - } - return []string{v} -} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index a6ed3cc7..a831b774 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -2,11 +2,12 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package internal contains support packages for oauth2 package. package internal import ( + "context" "encoding/json" + "errors" "fmt" "io" "io/ioutil" @@ -17,10 +18,10 @@ import ( "strings" "time" - "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" ) -// Token represents the crendentials used to authorize +// Token represents the credentials used to authorize // the requests to access protected resources on the OAuth 2.0 // provider's backend. // @@ -91,6 +92,7 @@ func (e *expirationTime) UnmarshalJSON(b []byte) error { var brokenAuthHeaderProviders = []string{ "https://accounts.google.com/", + "https://api.codeswholesale.com/oauth/token", "https://api.dropbox.com/", "https://api.dropboxapi.com/", "https://api.instagram.com/", @@ -99,10 +101,16 @@ var brokenAuthHeaderProviders = []string{ "https://api.pushbullet.com/", "https://api.soundcloud.com/", "https://api.twitch.tv/", + "https://id.twitch.tv/", "https://app.box.com/", + "https://api.box.com/", "https://connect.stripe.com/", + "https://login.mailchimp.com/", "https://login.microsoftonline.com/", "https://login.salesforce.com/", + "https://login.windows.net", + "https://login.live.com/", + "https://login.live-int.com/", "https://oauth.sandbox.trainingpeaks.com/", "https://oauth.trainingpeaks.com/", "https://oauth.vk.com/", @@ -117,6 +125,27 @@ var brokenAuthHeaderProviders = []string{ "https://www.strava.com/oauth/", "https://www.wunderlist.com/oauth/", "https://api.patreon.com/", + "https://sandbox.codeswholesale.com/oauth/token", + "https://api.sipgate.com/v1/authorization/oauth", + "https://api.medium.com/v1/tokens", + "https://log.finalsurge.com/oauth/token", + "https://multisport.todaysplan.com.au/rest/oauth/access_token", + "https://whats.todaysplan.com.au/rest/oauth/access_token", + "https://stackoverflow.com/oauth/access_token", + "https://account.health.nokia.com", + "https://accounts.zoho.com", + "https://gitter.im/login/oauth/token", + "https://openid-connect.onelogin.com/oidc", + "https://api.dailymotion.com/oauth/token", +} + +// brokenAuthHeaderDomains lists broken providers that issue dynamic endpoints. +var brokenAuthHeaderDomains = []string{ + ".auth0.com", + ".force.com", + ".myshopify.com", + ".okta.com", + ".oktapreview.com", } func RegisterBrokenAuthHeaderProvider(tokenURL string) { @@ -139,6 +168,14 @@ func providerAuthHeaderWorks(tokenURL string) bool { } } + if u, err := url.Parse(tokenURL); err == nil { + for _, s := range brokenAuthHeaderDomains { + if strings.HasSuffix(u.Host, s) { + return false + } + } + } + // Assume the provider implements the spec properly // otherwise. We can add more exceptions as they're // discovered. We will _not_ be adding configurable hooks @@ -146,25 +183,25 @@ func providerAuthHeaderWorks(tokenURL string) bool { return true } -func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string, v url.Values) (*Token, error) { - hc, err := ContextClient(ctx) - if err != nil { - return nil, err - } - v.Set("client_id", ClientID) - bustedAuth := !providerAuthHeaderWorks(TokenURL) - if bustedAuth && ClientSecret != "" { - v.Set("client_secret", ClientSecret) +func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values) (*Token, error) { + bustedAuth := !providerAuthHeaderWorks(tokenURL) + if bustedAuth { + if clientID != "" { + v.Set("client_id", clientID) + } + if clientSecret != "" { + v.Set("client_secret", clientSecret) + } } - req, err := http.NewRequest("POST", TokenURL, strings.NewReader(v.Encode())) + req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode())) if err != nil { return nil, err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") if !bustedAuth { - req.SetBasicAuth(ClientID, ClientSecret) + req.SetBasicAuth(url.QueryEscape(clientID), url.QueryEscape(clientSecret)) } - r, err := hc.Do(req) + r, err := ctxhttp.Do(ctx, ContextClient(ctx), req) if err != nil { return nil, err } @@ -174,7 +211,10 @@ func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string, return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) } if code := r.StatusCode; code < 200 || code > 299 { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body) + return nil, &RetrieveError{ + Response: r, + Body: body, + } } var token *Token @@ -221,5 +261,17 @@ func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string, if token.RefreshToken == "" { token.RefreshToken = v.Get("refresh_token") } + if token.AccessToken == "" { + return token, errors.New("oauth2: server response missing access_token") + } return token, nil } + +type RetrieveError struct { + Response *http.Response + Body []byte +} + +func (r *RetrieveError) Error() string { + return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) +} diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go index f1f173e3..572074a6 100644 --- a/vendor/golang.org/x/oauth2/internal/transport.go +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -2,13 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package internal contains support packages for oauth2 package. package internal import ( + "context" "net/http" - - "golang.org/x/net/context" ) // HTTPClient is the context key to use with golang.org/x/net/context's @@ -20,50 +18,16 @@ var HTTPClient ContextKey // because nobody else can create a ContextKey, being unexported. type ContextKey struct{} -// ContextClientFunc is a func which tries to return an *http.Client -// given a Context value. If it returns an error, the search stops -// with that error. If it returns (nil, nil), the search continues -// down the list of registered funcs. -type ContextClientFunc func(context.Context) (*http.Client, error) - -var contextClientFuncs []ContextClientFunc - -func RegisterContextClientFunc(fn ContextClientFunc) { - contextClientFuncs = append(contextClientFuncs, fn) -} +var appengineClientHook func(context.Context) *http.Client -func ContextClient(ctx context.Context) (*http.Client, error) { +func ContextClient(ctx context.Context) *http.Client { if ctx != nil { if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { - return hc, nil - } - } - for _, fn := range contextClientFuncs { - c, err := fn(ctx) - if err != nil { - return nil, err - } - if c != nil { - return c, nil + return hc } } - return http.DefaultClient, nil -} - -func ContextTransport(ctx context.Context) http.RoundTripper { - hc, err := ContextClient(ctx) - // This is a rare error case (somebody using nil on App Engine). - if err != nil { - return ErrorTransport{err} + if appengineClientHook != nil { + return appengineClientHook(ctx) } - return hc.Transport -} - -// ErrorTransport returns the specified error on RoundTrip. -// This RoundTripper should be used in rare error cases where -// error handling can be postponed to response handling time. -type ErrorTransport struct{ Err error } - -func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) { - return nil, t.Err + return http.DefaultClient } diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go new file mode 100644 index 00000000..8bcecb46 --- /dev/null +++ b/vendor/golang.org/x/oauth2/jws/jws.go @@ -0,0 +1,182 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jws provides a partial implementation +// of JSON Web Signature encoding and decoding. +// It exists to support the golang.org/x/oauth2 package. +// +// See RFC 7515. +// +// Deprecated: this package is not intended for public use and might be +// removed in the future. It exists for internal use only. +// Please switch to another JWS package or copy this package into your own +// source tree. +package jws + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" +) + +// ClaimSet contains information about the JWT signature including the +// permissions being requested (scopes), the target of the token, the issuer, +// the time the token was issued, and the lifetime of the token. +type ClaimSet struct { + Iss string `json:"iss"` // email address of the client_id of the application making the access token request + Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests + Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). + Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch) + Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch) + Typ string `json:"typ,omitempty"` // token type (Optional). + + // Email for which the application is requesting delegated access (Optional). + Sub string `json:"sub,omitempty"` + + // The old name of Sub. Client keeps setting Prn to be + // complaint with legacy OAuth 2.0 providers. (Optional) + Prn string `json:"prn,omitempty"` + + // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 + // This array is marshalled using custom code (see (c *ClaimSet) encode()). + PrivateClaims map[string]interface{} `json:"-"` +} + +func (c *ClaimSet) encode() (string, error) { + // Reverting time back for machines whose time is not perfectly in sync. + // If client machine's time is in the future according + // to Google servers, an access token will not be issued. + now := time.Now().Add(-10 * time.Second) + if c.Iat == 0 { + c.Iat = now.Unix() + } + if c.Exp == 0 { + c.Exp = now.Add(time.Hour).Unix() + } + if c.Exp < c.Iat { + return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat) + } + + b, err := json.Marshal(c) + if err != nil { + return "", err + } + + if len(c.PrivateClaims) == 0 { + return base64.RawURLEncoding.EncodeToString(b), nil + } + + // Marshal private claim set and then append it to b. + prv, err := json.Marshal(c.PrivateClaims) + if err != nil { + return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims) + } + + // Concatenate public and private claim JSON objects. + if !bytes.HasSuffix(b, []byte{'}'}) { + return "", fmt.Errorf("jws: invalid JSON %s", b) + } + if !bytes.HasPrefix(prv, []byte{'{'}) { + return "", fmt.Errorf("jws: invalid JSON %s", prv) + } + b[len(b)-1] = ',' // Replace closing curly brace with a comma. + b = append(b, prv[1:]...) // Append private claims. + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Header represents the header for the signed JWS payloads. +type Header struct { + // The algorithm used for signature. + Algorithm string `json:"alg"` + + // Represents the token type. + Typ string `json:"typ"` + + // The optional hint of which key is being used. + KeyID string `json:"kid,omitempty"` +} + +func (h *Header) encode() (string, error) { + b, err := json.Marshal(h) + if err != nil { + return "", err + } + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Decode decodes a claim set from a JWS payload. +func Decode(payload string) (*ClaimSet, error) { + // decode returned id token to get expiry + s := strings.Split(payload, ".") + if len(s) < 2 { + // TODO(jbd): Provide more context about the error. + return nil, errors.New("jws: invalid token received") + } + decoded, err := base64.RawURLEncoding.DecodeString(s[1]) + if err != nil { + return nil, err + } + c := &ClaimSet{} + err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c) + return c, err +} + +// Signer returns a signature for the given data. +type Signer func(data []byte) (sig []byte, err error) + +// EncodeWithSigner encodes a header and claim set with the provided signer. +func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) { + head, err := header.encode() + if err != nil { + return "", err + } + cs, err := c.encode() + if err != nil { + return "", err + } + ss := fmt.Sprintf("%s.%s", head, cs) + sig, err := sg([]byte(ss)) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil +} + +// Encode encodes a signed JWS with provided header and claim set. +// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key. +func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { + sg := func(data []byte) (sig []byte, err error) { + h := sha256.New() + h.Write(data) + return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) + } + return EncodeWithSigner(header, c, sg) +} + +// Verify tests whether the provided JWT token's signature was produced by the private key +// associated with the supplied public key. +func Verify(token string, key *rsa.PublicKey) error { + parts := strings.Split(token, ".") + if len(parts) != 3 { + return errors.New("jws: invalid token received, token must have 3 parts") + } + + signedContent := parts[0] + "." + parts[1] + signatureString, err := base64.RawURLEncoding.DecodeString(parts[2]) + if err != nil { + return err + } + + h := sha256.New() + h.Write([]byte(signedContent)) + return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString)) +} diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go new file mode 100644 index 00000000..0783a94c --- /dev/null +++ b/vendor/golang.org/x/oauth2/jwt/jwt.go @@ -0,0 +1,162 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly +// known as "two-legged OAuth 2.0". +// +// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12 +package jwt + +import ( + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +var ( + defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" + defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"} +) + +// Config is the configuration for using JWT to fetch tokens, +// commonly known as "two-legged OAuth 2.0". +type Config struct { + // Email is the OAuth client identifier used when communicating with + // the configured OAuth provider. + Email string + + // PrivateKey contains the contents of an RSA private key or the + // contents of a PEM file that contains a private key. The provided + // private key is used to sign JWT payloads. + // PEM containers with a passphrase are not supported. + // Use the following command to convert a PKCS 12 file into a PEM. + // + // $ openssl pkcs12 -in key.p12 -out key.pem -nodes + // + PrivateKey []byte + + // PrivateKeyID contains an optional hint indicating which key is being + // used. + PrivateKeyID string + + // Subject is the optional user to impersonate. + Subject string + + // Scopes optionally specifies a list of requested permission scopes. + Scopes []string + + // TokenURL is the endpoint required to complete the 2-legged JWT flow. + TokenURL string + + // Expires optionally specifies how long the token is valid for. + Expires time.Duration +} + +// TokenSource returns a JWT TokenSource using the configuration +// in c and the HTTP client from the provided context. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) +} + +// Client returns an HTTP client wrapping the context's +// HTTP transport and adding Authorization headers with tokens +// obtained from c. +// +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// jwtSource is a source that always does a signed JWT request for a token. +// It should typically be wrapped with a reuseTokenSource. +type jwtSource struct { + ctx context.Context + conf *Config +} + +func (js jwtSource) Token() (*oauth2.Token, error) { + pk, err := internal.ParseKey(js.conf.PrivateKey) + if err != nil { + return nil, err + } + hc := oauth2.NewClient(js.ctx, nil) + claimSet := &jws.ClaimSet{ + Iss: js.conf.Email, + Scope: strings.Join(js.conf.Scopes, " "), + Aud: js.conf.TokenURL, + } + if subject := js.conf.Subject; subject != "" { + claimSet.Sub = subject + // prn is the old name of sub. Keep setting it + // to be compatible with legacy OAuth 2.0 providers. + claimSet.Prn = subject + } + if t := js.conf.Expires; t > 0 { + claimSet.Exp = time.Now().Add(t).Unix() + } + h := *defaultHeader + h.KeyID = js.conf.PrivateKeyID + payload, err := jws.Encode(&h, claimSet, pk) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", defaultGrantType) + v.Set("assertion", payload) + resp, err := hc.PostForm(js.conf.TokenURL, v) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, &oauth2.RetrieveError{ + Response: resp, + Body: body, + } + } + // tokenRes is the JSON response body. + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + IDToken string `json:"id_token"` + ExpiresIn int64 `json:"expires_in"` // relative seconds from now + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + token := &oauth2.Token{ + AccessToken: tokenRes.AccessToken, + TokenType: tokenRes.TokenType, + } + raw := make(map[string]interface{}) + json.Unmarshal(body, &raw) // no error checks for optional fields + token = token.WithExtra(raw) + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + if v := tokenRes.IDToken; v != "" { + // decode returned id token to get expiry + claimSet, err := jws.Decode(v) + if err != nil { + return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err) + } + token.Expiry = time.Unix(claimSet.Exp, 0) + } + return token, nil +} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 9b7b977d..74c3364e 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -3,24 +3,27 @@ // license that can be found in the LICENSE file. // Package oauth2 provides support for making -// OAuth2 authorized and authenticated HTTP requests. +// OAuth2 authorized and authenticated HTTP requests, +// as specified in RFC 6749. // It can additionally grant authorization with Bearer JWT. package oauth2 import ( "bytes" + "context" "errors" "net/http" "net/url" "strings" "sync" - "golang.org/x/net/context" "golang.org/x/oauth2/internal" ) // NoContext is the default context you should supply if not using // your own context.Context (see https://golang.org/x/net/context). +// +// Deprecated: Use context.Background() or context.TODO() instead. var NoContext = context.TODO() // RegisterBrokenAuthHeaderProvider registers an OAuth2 server @@ -28,7 +31,7 @@ var NoContext = context.TODO() // which doesn't support the HTTP Basic authentication // scheme to authenticate with the authorization server. // Once a server is registered, credentials (client_id and client_secret) -// will be passed as query parameters rather than being present +// will be passed as parameters in the request body rather than being present // in the Authorization header. // See https://code.google.com/p/goauth2/issues/detail?id=31 for background. func RegisterBrokenAuthHeaderProvider(tokenURL string) { @@ -37,6 +40,8 @@ func RegisterBrokenAuthHeaderProvider(tokenURL string) { // Config describes a typical 3-legged OAuth2 flow, with both the // client application information and the server's endpoint URLs. +// For the client credentials 2-legged OAuth2 flow, see the clientcredentials +// package (https://golang.org/x/oauth2/clientcredentials). type Config struct { // ClientID is the application's ID. ClientID string @@ -113,21 +118,30 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // that asks for permissions for the required scopes explicitly. // // State is a token to protect the user from CSRF attacks. You must -// always provide a non-zero string and validate that it matches the +// always provide a non-empty string and validate that it matches the // the state query parameter on your redirect callback. // See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. // // Opts may include AccessTypeOnline or AccessTypeOffline, as well // as ApprovalForce. +// It can also be used to pass the PKCE challange. +// See https://www.oauth.com/oauth2-servers/pkce/ for more info. func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { var buf bytes.Buffer buf.WriteString(c.Endpoint.AuthURL) v := url.Values{ "response_type": {"code"}, "client_id": {c.ClientID}, - "redirect_uri": internal.CondVal(c.RedirectURL), - "scope": internal.CondVal(strings.Join(c.Scopes, " ")), - "state": internal.CondVal(state), + } + if c.RedirectURL != "" { + v.Set("redirect_uri", c.RedirectURL) + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + if state != "" { + // TODO(light): Docs say never to omit state; don't allow empty. + v.Set("state", state) } for _, opt := range opts { opt.setValue(v) @@ -150,15 +164,17 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { // and when other authorization grant types are not available." // See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. // -// The HTTP client to use is derived from the context. -// If nil, http.DefaultClient is used. +// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { - return retrieveToken(ctx, c, url.Values{ + v := url.Values{ "grant_type": {"password"}, "username": {username}, "password": {password}, - "scope": internal.CondVal(strings.Join(c.Scopes, " ")), - }) + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + return retrieveToken(ctx, c, v) } // Exchange converts an authorization code into a token. @@ -166,18 +182,25 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor // It is used after a resource provider redirects the user back // to the Redirect URI (the URL obtained from AuthCodeURL). // -// The HTTP client to use is derived from the context. -// If a client is not provided via the context, http.DefaultClient is used. +// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. // // The code will be in the *http.Request.FormValue("code"). Before // calling Exchange, be sure to validate FormValue("state"). -func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) { - return retrieveToken(ctx, c, url.Values{ - "grant_type": {"authorization_code"}, - "code": {code}, - "redirect_uri": internal.CondVal(c.RedirectURL), - "scope": internal.CondVal(strings.Join(c.Scopes, " ")), - }) +// +// Opts may include the PKCE verifier code if previously used in AuthCodeURL. +// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) { + v := url.Values{ + "grant_type": {"authorization_code"}, + "code": {code}, + } + if c.RedirectURL != "" { + v.Set("redirect_uri", c.RedirectURL) + } + for _, opt := range opts { + opt.setValue(v) + } + return retrieveToken(ctx, c, v) } // Client returns an HTTP client using the provided token. @@ -288,20 +311,20 @@ var HTTPClient internal.ContextKey // NewClient creates an *http.Client from a Context and TokenSource. // The returned client is not valid beyond the lifetime of the context. // +// Note that if a custom *http.Client is provided via the Context it +// is used only for token acquisition and is not used to configure the +// *http.Client returned from NewClient. +// // As a special case, if src is nil, a non-OAuth2 client is returned // using the provided context. This exists to support related OAuth2 // packages. func NewClient(ctx context.Context, src TokenSource) *http.Client { if src == nil { - c, err := internal.ContextClient(ctx) - if err != nil { - return &http.Client{Transport: internal.ErrorTransport{err}} - } - return c + return internal.ContextClient(ctx) } return &http.Client{ Transport: &Transport{ - Base: internal.ContextTransport(ctx), + Base: internal.ContextClient(ctx).Transport, Source: ReuseTokenSource(nil, src), }, } diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 7a3167f1..ee4be545 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -5,13 +5,14 @@ package oauth2 import ( + "context" + "fmt" "net/http" "net/url" "strconv" "strings" "time" - "golang.org/x/net/context" "golang.org/x/oauth2/internal" ) @@ -20,7 +21,7 @@ import ( // expirations due to client-server time mismatches. const expiryDelta = 10 * time.Second -// Token represents the crendentials used to authorize +// Token represents the credentials used to authorize // the requests to access protected resources on the OAuth 2.0 // provider's backend. // @@ -117,13 +118,16 @@ func (t *Token) Extra(key string) interface{} { return v } +// timeNow is time.Now but pulled out as a variable for tests. +var timeNow = time.Now + // expired reports whether the token is expired. // t must be non-nil. func (t *Token) expired() bool { if t.Expiry.IsZero() { return false } - return t.Expiry.Add(-expiryDelta).Before(time.Now()) + return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow()) } // Valid reports whether t is non-nil, has an AccessToken, and is not expired. @@ -152,7 +156,23 @@ func tokenFromInternal(t *internal.Token) *Token { func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v) if err != nil { + if rErr, ok := err.(*internal.RetrieveError); ok { + return nil, (*RetrieveError)(rErr) + } return nil, err } return tokenFromInternal(tk), nil } + +// RetrieveError is the error returned when the token endpoint returns a +// non-2XX HTTP status code. +type RetrieveError struct { + Response *http.Response + // Body is the body that was consumed by reading Response.Body. + // It may be truncated. + Body []byte +} + +func (r *RetrieveError) Error() string { + return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) +} diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go index 92ac7e25..aa0d34f1 100644 --- a/vendor/golang.org/x/oauth2/transport.go +++ b/vendor/golang.org/x/oauth2/transport.go @@ -31,9 +31,17 @@ type Transport struct { } // RoundTrip authorizes and authenticates the request with an -// access token. If no token exists or token is expired, -// tries to refresh/fetch a new token. +// access token from Transport's Source. func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + reqBodyClosed := false + if req.Body != nil { + defer func() { + if !reqBodyClosed { + req.Body.Close() + } + }() + } + if t.Source == nil { return nil, errors.New("oauth2: Transport's Source is nil") } @@ -46,6 +54,10 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { token.SetAuthHeader(req2) t.setModReq(req, req2) res, err := t.base().RoundTrip(req2) + + // req.Body is assumed to have been closed by the base RoundTripper. + reqBodyClosed = true + if err != nil { t.setModReq(req, nil) return nil, err diff --git a/vendor/google.golang.org/api/AUTHORS b/vendor/google.golang.org/api/AUTHORS new file mode 100644 index 00000000..f73b7257 --- /dev/null +++ b/vendor/google.golang.org/api/AUTHORS @@ -0,0 +1,10 @@ +# This is the official list of authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. +Google Inc. diff --git a/vendor/google.golang.org/api/CONTRIBUTORS b/vendor/google.golang.org/api/CONTRIBUTORS new file mode 100644 index 00000000..fe55ebff --- /dev/null +++ b/vendor/google.golang.org/api/CONTRIBUTORS @@ -0,0 +1,55 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# https://cla.developers.google.com/about/google-individual +# https://cla.developers.google.com/about/google-corporate +# +# The CLA can be filled out on the web: +# +# https://cla.developers.google.com/ +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name +# +# An entry with two email addresses specifies that the +# first address should be used in the submit logs and +# that the second address should be recognized as the +# same person when interacting with Rietveld. + +# Please keep the list sorted. + +Alain Vongsouvanhalainv +Andrew Gerrand +Brad Fitzpatrick +Eric Koleda +Francesc Campoy +Garrick Evans +Glenn Lewis +Ivan Krasin +Jason Hall +Johan Euphrosine +Kostik Shtoyk +Kunpei Sakai +Matthew Whisenhunt +Michael McGreevy +Nick Craig-Wood +Robbie Trencheny +Ross Light +Sarah Adams +Scott Van Woudenberg +Takashi Matsuo diff --git a/vendor/google.golang.org/api/drive/v3/drive-api.json b/vendor/google.golang.org/api/drive/v3/drive-api.json index 896d44e3..d75ae30f 100644 --- a/vendor/google.golang.org/api/drive/v3/drive-api.json +++ b/vendor/google.golang.org/api/drive/v3/drive-api.json @@ -1,2410 +1,3180 @@ { - "kind": "discovery#restDescription", - "etag": "\"bRFOOrZKfO9LweMbPqu0kcu6De8/O9_NbpoVnW5GMGl7qWBIajcyrt8\"", - "discoveryVersion": "v1", - "id": "drive:v3", - "name": "drive", - "version": "v3", - "revision": "20160303", - "title": "Drive API", - "description": "The API to interact with Drive.", - "ownerDomain": "google.com", - "ownerName": "Google", - "icons": { - "x16": "https://ssl.gstatic.com/docs/doclist/images/drive_icon_16.png", - "x32": "https://ssl.gstatic.com/docs/doclist/images/drive_icon_32.png" - }, - "documentationLink": "https://developers.google.com/drive/", - "protocol": "rest", - "baseUrl": "https://www.googleapis.com/drive/v3/", - "basePath": "/drive/v3/", - "rootUrl": "https://www.googleapis.com/", - "servicePath": "drive/v3/", - "batchPath": "batch", - "parameters": { - "alt": { - "type": "string", - "description": "Data format for the response.", - "default": "json", - "enum": [ - "json" - ], - "enumDescriptions": [ - "Responses with Content-Type of application/json" - ], - "location": "query" - }, - "fields": { - "type": "string", - "description": "Selector specifying which fields to include in a partial response.", - "location": "query" - }, - "key": { - "type": "string", - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "location": "query" - }, - "oauth_token": { - "type": "string", - "description": "OAuth 2.0 token for the current user.", - "location": "query" - }, - "prettyPrint": { - "type": "boolean", - "description": "Returns response with indentations and line breaks.", - "default": "true", - "location": "query" - }, - "quotaUser": { - "type": "string", - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.", - "location": "query" - }, - "userIp": { - "type": "string", - "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.", - "location": "query" - } - }, - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/drive": { - "description": "View and manage the files in your Google Drive" - }, - "https://www.googleapis.com/auth/drive.appdata": { - "description": "View and manage its own configuration data in your Google Drive" - }, - "https://www.googleapis.com/auth/drive.file": { - "description": "View and manage Google Drive files and folders that you have opened or created with this app" - }, - "https://www.googleapis.com/auth/drive.metadata": { - "description": "View and manage metadata of files in your Google Drive" - }, - "https://www.googleapis.com/auth/drive.metadata.readonly": { - "description": "View metadata for files in your Google Drive" - }, - "https://www.googleapis.com/auth/drive.photos.readonly": { - "description": "View the photos, videos and albums in your Google Photos" - }, - "https://www.googleapis.com/auth/drive.readonly": { - "description": "View the files in your Google Drive" - }, - "https://www.googleapis.com/auth/drive.scripts": { - "description": "Modify your Google Apps Script scripts' behavior" - } - } - } - }, - "schemas": { - "About": { - "id": "About", - "type": "object", - "description": "Information about the user, the user's Drive, and system capabilities.", - "properties": { - "appInstalled": { - "type": "boolean", - "description": "Whether the user has installed the requesting app." - }, - "exportFormats": { - "type": "object", - "description": "A map of source MIME type to possible targets for all supported exports.", - "additionalProperties": { - "type": "array", - "items": { - "type": "string" - } - } - }, - "folderColorPalette": { - "type": "array", - "description": "The currently supported folder colors as RGB hex strings.", - "items": { - "type": "string" - } - }, - "importFormats": { - "type": "object", - "description": "A map of source MIME type to possible targets for all supported imports.", - "additionalProperties": { - "type": "array", - "items": { - "type": "string" - } - } - }, - "kind": { - "type": "string", - "description": "This is always drive#about.", - "default": "drive#about" - }, - "maxImportSizes": { - "type": "object", - "description": "A map of maximum import sizes by MIME type, in bytes.", - "additionalProperties": { - "type": "string", - "format": "int64" - } - }, - "maxUploadSize": { - "type": "string", - "description": "The maximum upload size in bytes.", - "format": "int64" - }, - "storageQuota": { - "type": "object", - "description": "The user's storage quota limits and usage. All fields are measured in bytes.", - "properties": { - "limit": { - "type": "string", - "description": "The usage limit, if applicable. This will not be present if the user has unlimited storage.", - "format": "int64" - }, - "usage": { - "type": "string", - "description": "The total usage across all services.", - "format": "int64" - }, - "usageInDrive": { - "type": "string", - "description": "The usage by all files in Google Drive.", - "format": "int64" - }, - "usageInDriveTrash": { - "type": "string", - "description": "The usage by trashed files in Google Drive.", - "format": "int64" - } - } - }, - "user": { - "$ref": "User", - "description": "The authenticated user." - } - } - }, - "Change": { - "id": "Change", - "type": "object", - "description": "A change to a file.", - "properties": { - "file": { - "$ref": "File", - "description": "The updated state of the file. Present if the file has not been removed." - }, - "fileId": { - "type": "string", - "description": "The ID of the file which has changed." - }, - "kind": { - "type": "string", - "description": "This is always drive#change.", - "default": "drive#change" - }, - "removed": { - "type": "boolean", - "description": "Whether the file has been removed from the view of the changes list, for example by deletion or lost access." - }, - "time": { - "type": "string", - "description": "The time of this change (RFC 3339 date-time).", - "format": "date-time" - } - } - }, - "ChangeList": { - "id": "ChangeList", - "type": "object", - "description": "A list of changes for a user.", - "properties": { - "changes": { - "type": "array", - "description": "The page of changes.", - "items": { - "$ref": "Change" - } - }, - "kind": { - "type": "string", - "description": "This is always drive#changeList.", - "default": "drive#changeList" - }, - "newStartPageToken": { - "type": "string", - "description": "The starting page token for future changes. This will be present only if the end of the current changes list has been reached." - }, - "nextPageToken": { - "type": "string", - "description": "The page token for the next page of changes. This will be absent if the end of the current changes list has been reached." - } - } - }, - "Channel": { - "id": "Channel", - "type": "object", - "description": "An notification channel used to watch for resource changes.", - "properties": { - "address": { - "type": "string", - "description": "The address where notifications are delivered for this channel." - }, - "expiration": { - "type": "string", - "description": "Date and time of notification channel expiration, expressed as a Unix timestamp, in milliseconds. Optional.", - "format": "int64" - }, - "id": { - "type": "string", - "description": "A UUID or similar unique string that identifies this channel." - }, - "kind": { - "type": "string", - "description": "Identifies this as a notification channel used to watch for changes to a resource. Value: the fixed string \"api#channel\".", - "default": "api#channel" - }, - "params": { - "type": "object", - "description": "Additional parameters controlling delivery channel behavior. Optional.", - "additionalProperties": { - "type": "string", - "description": "Declares a new parameter by name." - } - }, - "payload": { - "type": "boolean", - "description": "A Boolean value to indicate whether payload is wanted. Optional." - }, - "resourceId": { - "type": "string", - "description": "An opaque ID that identifies the resource being watched on this channel. Stable across different API versions." - }, - "resourceUri": { - "type": "string", - "description": "A version-specific identifier for the watched resource." - }, - "token": { - "type": "string", - "description": "An arbitrary string delivered to the target address with each notification delivered over this channel. Optional." - }, - "type": { - "type": "string", - "description": "The type of delivery mechanism used for this channel." - } - } - }, - "Comment": { - "id": "Comment", - "type": "object", - "description": "A comment on a file.", - "properties": { - "anchor": { - "type": "string", - "description": "A region of the document represented as a JSON string. See anchor documentation for details on how to define and interpret anchor properties." - }, - "author": { - "$ref": "User", - "description": "The user who created the comment." - }, - "content": { - "type": "string", - "description": "The plain text content of the comment. This field is used for setting the content, while htmlContent should be displayed.", - "annotations": { - "required": [ - "drive.comments.create", - "drive.comments.update" - ] - } - }, - "createdTime": { - "type": "string", - "description": "The time at which the comment was created (RFC 3339 date-time).", - "format": "date-time" - }, - "deleted": { - "type": "boolean", - "description": "Whether the comment has been deleted. A deleted comment has no content." - }, - "htmlContent": { - "type": "string", - "description": "The content of the comment with HTML formatting." - }, - "id": { - "type": "string", - "description": "The ID of the comment." - }, - "kind": { - "type": "string", - "description": "This is always drive#comment.", - "default": "drive#comment" - }, - "modifiedTime": { - "type": "string", - "description": "The last time the comment or any of its replies was modified (RFC 3339 date-time).", - "format": "date-time" - }, - "quotedFileContent": { - "type": "object", - "description": "The file content to which the comment refers, typically within the anchor region. For a text file, for example, this would be the text at the location of the comment.", - "properties": { - "mimeType": { - "type": "string", - "description": "The MIME type of the quoted content." - }, - "value": { - "type": "string", - "description": "The quoted content itself. This is interpreted as plain text if set through the API." + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/drive": { + "description": "See, edit, create, and delete all of your Google Drive files" + }, + "https://www.googleapis.com/auth/drive.appdata": { + "description": "View and manage its own configuration data in your Google Drive" + }, + "https://www.googleapis.com/auth/drive.file": { + "description": "View and manage Google Drive files and folders that you have opened or created with this app" + }, + "https://www.googleapis.com/auth/drive.metadata": { + "description": "View and manage metadata of files in your Google Drive" + }, + "https://www.googleapis.com/auth/drive.metadata.readonly": { + "description": "View metadata for files in your Google Drive" + }, + "https://www.googleapis.com/auth/drive.photos.readonly": { + "description": "View the photos, videos and albums in your Google Photos" + }, + "https://www.googleapis.com/auth/drive.readonly": { + "description": "See and download all your Google Drive files" + }, + "https://www.googleapis.com/auth/drive.scripts": { + "description": "Modify your Google Apps Script scripts' behavior" + } } - } - }, - "replies": { - "type": "array", - "description": "The full list of replies to the comment in chronological order.", - "items": { - "$ref": "Reply" - } - }, - "resolved": { - "type": "boolean", - "description": "Whether the comment has been resolved by one of its replies." } - } }, - "CommentList": { - "id": "CommentList", - "type": "object", - "description": "A list of comments on a file.", - "properties": { - "comments": { - "type": "array", - "description": "The page of comments.", - "items": { - "$ref": "Comment" - } - }, - "kind": { - "type": "string", - "description": "This is always drive#commentList.", - "default": "drive#commentList" - }, - "nextPageToken": { - "type": "string", - "description": "The page token for the next page of comments. This will be absent if the end of the comments list has been reached." - } - } + "basePath": "/drive/v3/", + "baseUrl": "https://www.googleapis.com/drive/v3/", + "batchPath": "batch/drive/v3", + "description": "Manages files in Drive including uploading, downloading, searching, detecting changes, and updating sharing permissions.", + "discoveryVersion": "v1", + "documentationLink": "https://developers.google.com/drive/", + "etag": "\"J3WqvAcMk4eQjJXvfSI4Yr8VouA/DmsVwHXLUfZJqrDE1d-9tgSUvKk\"", + "icons": { + "x16": "https://ssl.gstatic.com/docs/doclist/images/drive_icon_16.png", + "x32": "https://ssl.gstatic.com/docs/doclist/images/drive_icon_32.png" }, - "File": { - "id": "File", - "type": "object", - "description": "The metadata for a file.", - "properties": { - "appProperties": { - "type": "object", - "description": "A collection of arbitrary key-value pairs which are private to the requesting app.\nEntries with null values are cleared in update and copy requests.", - "additionalProperties": { + "id": "drive:v3", + "kind": "discovery#restDescription", + "name": "drive", + "ownerDomain": "google.com", + "ownerName": "Google", + "parameters": { + "alt": { + "default": "json", + "description": "Data format for the response.", + "enum": [ + "json" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json" + ], + "location": "query", "type": "string" - } - }, - "capabilities": { - "type": "object", - "description": "Capabilities the current user has on the file.", - "properties": { - "canComment": { - "type": "boolean", - "description": "Whether the user can comment on the file." - }, - "canCopy": { - "type": "boolean", - "description": "Whether the user can copy the file." - }, - "canEdit": { - "type": "boolean", - "description": "Whether the user can edit the file's content." - }, - "canReadRevisions": { - "type": "boolean", - "description": "Whether the current user has read access to the Revisions resource of the file." - }, - "canShare": { - "type": "boolean", - "description": "Whether the user can modify the file's permissions and sharing settings." - } - } }, - "contentHints": { - "type": "object", - "description": "Additional information about the content of the file. These fields are never populated in responses.", - "properties": { - "indexableText": { - "type": "string", - "description": "Text to be indexed for the file to improve fullText queries. This is limited to 128KB in length and may contain HTML elements." - }, - "thumbnail": { - "type": "object", - "description": "A thumbnail for the file. This will only be used if Drive cannot generate a standard thumbnail.", - "properties": { - "image": { - "type": "string", - "description": "The thumbnail data encoded with URL-safe Base64 (RFC 4648 section 5).", - "format": "byte" - }, - "mimeType": { - "type": "string", - "description": "The MIME type of the thumbnail." - } - } - } - } - }, - "createdTime": { - "type": "string", - "description": "The time at which the file was created (RFC 3339 date-time).", - "format": "date-time" - }, - "description": { - "type": "string", - "description": "A short description of the file." - }, - "explicitlyTrashed": { - "type": "boolean", - "description": "Whether the file has been explicitly trashed, as opposed to recursively trashed from a parent folder." - }, - "fileExtension": { - "type": "string", - "description": "The final component of fullFileExtension. This is only available for files with binary content in Drive." - }, - "folderColorRgb": { - "type": "string", - "description": "The color for a folder as an RGB hex string. The supported colors are published in the folderColorPalette field of the About resource.\nIf an unsupported color is specified, the closest color in the palette will be used instead." - }, - "fullFileExtension": { - "type": "string", - "description": "The full file extension extracted from the name field. May contain multiple concatenated extensions, such as \"tar.gz\". This is only available for files with binary content in Drive.\nThis is automatically updated when the name field changes, however it is not cleared if the new name does not contain a valid extension." - }, - "headRevisionId": { - "type": "string", - "description": "The ID of the file's head revision. This is currently only available for files with binary content in Drive." - }, - "iconLink": { - "type": "string", - "description": "A static, unauthenticated link to the file's icon." - }, - "id": { - "type": "string", - "description": "The ID of the file." - }, - "imageMediaMetadata": { - "type": "object", - "description": "Additional metadata about image media, if available.", - "properties": { - "aperture": { - "type": "number", - "description": "The aperture used to create the photo (f-number).", - "format": "float" - }, - "cameraMake": { - "type": "string", - "description": "The make of the camera used to create the photo." - }, - "cameraModel": { - "type": "string", - "description": "The model of the camera used to create the photo." - }, - "colorSpace": { - "type": "string", - "description": "The color space of the photo." - }, - "exposureBias": { - "type": "number", - "description": "The exposure bias of the photo (APEX value).", - "format": "float" - }, - "exposureMode": { - "type": "string", - "description": "The exposure mode used to create the photo." - }, - "exposureTime": { - "type": "number", - "description": "The length of the exposure, in seconds.", - "format": "float" - }, - "flashUsed": { - "type": "boolean", - "description": "Whether a flash was used to create the photo." - }, - "focalLength": { - "type": "number", - "description": "The focal length used to create the photo, in millimeters.", - "format": "float" - }, - "height": { - "type": "integer", - "description": "The height of the image in pixels.", - "format": "int32" - }, - "isoSpeed": { - "type": "integer", - "description": "The ISO speed used to create the photo.", - "format": "int32" - }, - "lens": { - "type": "string", - "description": "The lens used to create the photo." - }, - "location": { - "type": "object", - "description": "Geographic location information stored in the image.", - "properties": { - "altitude": { - "type": "number", - "description": "The altitude stored in the image.", - "format": "double" - }, - "latitude": { - "type": "number", - "description": "The latitude stored in the image.", - "format": "double" - }, - "longitude": { - "type": "number", - "description": "The longitude stored in the image.", - "format": "double" - } - } - }, - "maxApertureValue": { - "type": "number", - "description": "The smallest f-number of the lens at the focal length used to create the photo (APEX value).", - "format": "float" - }, - "meteringMode": { - "type": "string", - "description": "The metering mode used to create the photo." - }, - "rotation": { - "type": "integer", - "description": "The rotation in clockwise degrees from the image's original orientation.", - "format": "int32" - }, - "sensor": { - "type": "string", - "description": "The type of sensor used to create the photo." - }, - "subjectDistance": { - "type": "integer", - "description": "The distance to the subject of the photo, in meters.", - "format": "int32" - }, - "time": { - "type": "string", - "description": "The date and time the photo was taken (EXIF DateTime)." - }, - "whiteBalance": { - "type": "string", - "description": "The white balance mode used to create the photo." - }, - "width": { - "type": "integer", - "description": "The width of the image in pixels.", - "format": "int32" - } - } - }, - "kind": { - "type": "string", - "description": "This is always drive#file.", - "default": "drive#file" - }, - "lastModifyingUser": { - "$ref": "User", - "description": "The last user to modify the file." - }, - "md5Checksum": { - "type": "string", - "description": "The MD5 checksum for the content of the file. This is only applicable to files with binary content in Drive." - }, - "mimeType": { - "type": "string", - "description": "The MIME type of the file.\nDrive will attempt to automatically detect an appropriate value from uploaded content if no value is provided. The value cannot be changed unless a new revision is uploaded.\nIf a file is created with a Google Doc MIME type, the uploaded content will be imported if possible. The supported import formats are published in the About resource." - }, - "modifiedByMeTime": { - "type": "string", - "description": "The last time the file was modified by the user (RFC 3339 date-time).", - "format": "date-time" - }, - "modifiedTime": { - "type": "string", - "description": "The last time the file was modified by anyone (RFC 3339 date-time).\nNote that setting modifiedTime will also update modifiedByMeTime for the user.", - "format": "date-time" - }, - "name": { - "type": "string", - "description": "The name of the file. This is not necessarily unique within a folder." - }, - "originalFilename": { - "type": "string", - "description": "The original filename of the uploaded content if available, or else the original value of the name field. This is only available for files with binary content in Drive." - }, - "ownedByMe": { - "type": "boolean", - "description": "Whether the user owns the file." - }, - "owners": { - "type": "array", - "description": "The owners of the file. Currently, only certain legacy files may have more than one owner.", - "items": { - "$ref": "User" - } - }, - "parents": { - "type": "array", - "description": "The IDs of the parent folders which contain the file.\nIf not specified as part of a create request, the file will be placed directly in the My Drive folder. Update requests must use the addParents and removeParents parameters to modify the values.", - "items": { + "fields": { + "description": "Selector specifying which fields to include in a partial response.", + "location": "query", "type": "string" - } - }, - "permissions": { - "type": "array", - "description": "The full list of permissions for the file. This is only available if the requesting user can share the file.", - "items": { - "$ref": "Permission" - } }, - "properties": { - "type": "object", - "description": "A collection of arbitrary key-value pairs which are visible to all apps.\nEntries with null values are cleared in update and copy requests.", - "additionalProperties": { + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query", "type": "string" - } - }, - "quotaBytesUsed": { - "type": "string", - "description": "The number of storage quota bytes used by the file. This includes the head revision as well as previous revisions with keepForever enabled.", - "format": "int64" }, - "shared": { - "type": "boolean", - "description": "Whether the file has been shared." - }, - "sharedWithMeTime": { - "type": "string", - "description": "The time at which the file was shared with the user, if applicable (RFC 3339 date-time).", - "format": "date-time" - }, - "sharingUser": { - "$ref": "User", - "description": "The user who shared the file with the requesting user, if applicable." - }, - "size": { - "type": "string", - "description": "The size of the file's content in bytes. This is only applicable to files with binary content in Drive.", - "format": "int64" - }, - "spaces": { - "type": "array", - "description": "The list of spaces which contain the file. The currently supported values are 'drive', 'appDataFolder' and 'photos'.", - "items": { + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "location": "query", "type": "string" - } - }, - "starred": { - "type": "boolean", - "description": "Whether the user has starred the file." - }, - "thumbnailLink": { - "type": "string", - "description": "A short-lived link to the file's thumbnail, if available. Typically lasts on the order of hours." - }, - "trashed": { - "type": "boolean", - "description": "Whether the file has been trashed, either explicitly or from a trashed parent folder. Only the owner may trash a file, and other users cannot see files in the owner's trash." - }, - "version": { - "type": "string", - "description": "A monotonically increasing version number for the file. This reflects every change made to the file on the server, even those not visible to the user.", - "format": "int64" - }, - "videoMediaMetadata": { - "type": "object", - "description": "Additional metadata about video media. This may not be available immediately upon upload.", - "properties": { - "durationMillis": { - "type": "string", - "description": "The duration of the video in milliseconds.", - "format": "int64" - }, - "height": { - "type": "integer", - "description": "The height of the video in pixels.", - "format": "int32" - }, - "width": { - "type": "integer", - "description": "The width of the video in pixels.", - "format": "int32" - } - } - }, - "viewedByMe": { - "type": "boolean", - "description": "Whether the file has been viewed by this user." - }, - "viewedByMeTime": { - "type": "string", - "description": "The last time the file was viewed by the user (RFC 3339 date-time).", - "format": "date-time" }, - "viewersCanCopyContent": { - "type": "boolean", - "description": "Whether users with only reader or commenter permission can copy the file's content. This affects copy, download, and print operations." + "prettyPrint": { + "default": "true", + "description": "Returns response with indentations and line breaks.", + "location": "query", + "type": "boolean" }, - "webContentLink": { - "type": "string", - "description": "A link for downloading the content of the file in a browser. This is only available for files with binary content in Drive." - }, - "webViewLink": { - "type": "string", - "description": "A link for opening the file in a relevant Google editor or viewer in a browser." - }, - "writersCanShare": { - "type": "boolean", - "description": "Whether users with only writer permission can modify the file's permissions." - } - } - }, - "FileList": { - "id": "FileList", - "type": "object", - "description": "A list of files.", - "properties": { - "files": { - "type": "array", - "description": "The page of files.", - "items": { - "$ref": "File" - } - }, - "kind": { - "type": "string", - "description": "This is always drive#fileList.", - "default": "drive#fileList" - }, - "nextPageToken": { - "type": "string", - "description": "The page token for the next page of files. This will be absent if the end of the files list has been reached." - } - } - }, - "GeneratedIds": { - "id": "GeneratedIds", - "type": "object", - "description": "A list of generated file IDs which can be provided in create requests.", - "properties": { - "ids": { - "type": "array", - "description": "The IDs generated for the requesting user in the specified space.", - "items": { + "quotaUser": { + "description": "An opaque string that represents a user for quota purposes. Must not exceed 40 characters.", + "location": "query", "type": "string" - } - }, - "kind": { - "type": "string", - "description": "This is always drive#generatedIds", - "default": "drive#generatedIds" - }, - "space": { - "type": "string", - "description": "The type of file that can be created with these IDs." - } - } - }, - "Permission": { - "id": "Permission", - "type": "object", - "description": "A permission for a file. A permission grants a user, group, domain or the world access to a file or a folder hierarchy.", - "properties": { - "allowFileDiscovery": { - "type": "boolean", - "description": "Whether the permission allows the file to be discovered through search. This is only applicable for permissions of type domain or anyone." - }, - "displayName": { - "type": "string", - "description": "A displayable name for users, groups or domains." - }, - "domain": { - "type": "string", - "description": "The domain to which this permission refers." - }, - "emailAddress": { - "type": "string", - "description": "The email address of the user or group to which this permission refers." - }, - "id": { - "type": "string", - "description": "The ID of this permission. This is a unique identifier for the grantee, and is published in User resources as permissionId." - }, - "kind": { - "type": "string", - "description": "This is always drive#permission.", - "default": "drive#permission" - }, - "photoLink": { - "type": "string", - "description": "A link to the user's profile photo, if available." - }, - "role": { - "type": "string", - "description": "The role granted by this permission. Valid values are: \n- owner \n- writer \n- commenter \n- reader", - "annotations": { - "required": [ - "drive.permissions.create" - ] - } - }, - "type": { - "type": "string", - "description": "The type of the grantee. Valid values are: \n- user \n- group \n- domain \n- anyone", - "annotations": { - "required": [ - "drive.permissions.create" - ] - } - } - } - }, - "PermissionList": { - "id": "PermissionList", - "type": "object", - "description": "A list of permissions for a file.", - "properties": { - "kind": { - "type": "string", - "description": "This is always drive#permissionList.", - "default": "drive#permissionList" - }, - "permissions": { - "type": "array", - "description": "The full list of permissions.", - "items": { - "$ref": "Permission" - } - } - } - }, - "Reply": { - "id": "Reply", - "type": "object", - "description": "A reply to a comment on a file.", - "properties": { - "action": { - "type": "string", - "description": "The action the reply performed to the parent comment. Valid values are: \n- resolve \n- reopen" - }, - "author": { - "$ref": "User", - "description": "The user who created the reply." - }, - "content": { - "type": "string", - "description": "The plain text content of the reply. This field is used for setting the content, while htmlContent should be displayed. This is required on creates if no action is specified.", - "annotations": { - "required": [ - "drive.replies.update" - ] - } - }, - "createdTime": { - "type": "string", - "description": "The time at which the reply was created (RFC 3339 date-time).", - "format": "date-time" - }, - "deleted": { - "type": "boolean", - "description": "Whether the reply has been deleted. A deleted reply has no content." - }, - "htmlContent": { - "type": "string", - "description": "The content of the reply with HTML formatting." - }, - "id": { - "type": "string", - "description": "The ID of the reply." - }, - "kind": { - "type": "string", - "description": "This is always drive#reply.", - "default": "drive#reply" - }, - "modifiedTime": { - "type": "string", - "description": "The last time the reply was modified (RFC 3339 date-time).", - "format": "date-time" - } - } - }, - "ReplyList": { - "id": "ReplyList", - "type": "object", - "description": "A list of replies to a comment on a file.", - "properties": { - "kind": { - "type": "string", - "description": "This is always drive#replyList.", - "default": "drive#replyList" - }, - "nextPageToken": { - "type": "string", - "description": "The page token for the next page of replies. This will be absent if the end of the replies list has been reached." - }, - "replies": { - "type": "array", - "description": "The page of replies.", - "items": { - "$ref": "Reply" - } - } - } - }, - "Revision": { - "id": "Revision", - "type": "object", - "description": "The metadata for a revision to a file.", - "properties": { - "id": { - "type": "string", - "description": "The ID of the revision." - }, - "keepForever": { - "type": "boolean", - "description": "Whether to keep this revision forever, even if it is no longer the head revision. If not set, the revision will be automatically purged 30 days after newer content is uploaded. This can be set on a maximum of 200 revisions for a file.\nThis field is only applicable to files with binary content in Drive." - }, - "kind": { - "type": "string", - "description": "This is always drive#revision.", - "default": "drive#revision" - }, - "lastModifyingUser": { - "$ref": "User", - "description": "The last user to modify this revision." - }, - "md5Checksum": { - "type": "string", - "description": "The MD5 checksum of the revision's content. This is only applicable to files with binary content in Drive." - }, - "mimeType": { - "type": "string", - "description": "The MIME type of the revision." - }, - "modifiedTime": { - "type": "string", - "description": "The last time the revision was modified (RFC 3339 date-time).", - "format": "date-time" - }, - "originalFilename": { - "type": "string", - "description": "The original filename used to create this revision. This is only applicable to files with binary content in Drive." - }, - "publishAuto": { - "type": "boolean", - "description": "Whether subsequent revisions will be automatically republished. This is only applicable to Google Docs." - }, - "published": { - "type": "boolean", - "description": "Whether this revision is published. This is only applicable to Google Docs." - }, - "publishedOutsideDomain": { - "type": "boolean", - "description": "Whether this revision is published outside the domain. This is only applicable to Google Docs." - }, - "size": { - "type": "string", - "description": "The size of the revision's content in bytes. This is only applicable to files with binary content in Drive.", - "format": "int64" - } - } - }, - "RevisionList": { - "id": "RevisionList", - "type": "object", - "description": "A list of revisions of a file.", - "properties": { - "kind": { - "type": "string", - "description": "This is always drive#revisionList.", - "default": "drive#revisionList" }, - "revisions": { - "type": "array", - "description": "The full list of revisions.", - "items": { - "$ref": "Revision" - } - } - } - }, - "StartPageToken": { - "id": "StartPageToken", - "type": "object", - "properties": { - "kind": { - "type": "string", - "description": "This is always drive#startPageToken.", - "default": "drive#startPageToken" - }, - "startPageToken": { - "type": "string", - "description": "The starting page token for listing changes." - } - } - }, - "User": { - "id": "User", - "type": "object", - "description": "Information about a Drive user.", - "properties": { - "displayName": { - "type": "string", - "description": "A plain text displayable name for this user." - }, - "emailAddress": { - "type": "string", - "description": "The email address of the user. This may not be present in certain contexts if the user has not made their email address visible to the requester." - }, - "kind": { - "type": "string", - "description": "This is always drive#user.", - "default": "drive#user" - }, - "me": { - "type": "boolean", - "description": "Whether this user is the requesting user." - }, - "permissionId": { - "type": "string", - "description": "The user's ID as visible in Permission resources." - }, - "photoLink": { - "type": "string", - "description": "A link to the user's profile photo, if available." - } - } - } - }, - "resources": { - "about": { - "methods": { - "get": { - "id": "drive.about.get", - "path": "about", - "httpMethod": "GET", - "description": "Gets information about the user, the user's Drive, and system capabilities.", - "response": { - "$ref": "About" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.appdata", - "https://www.googleapis.com/auth/drive.file", - "https://www.googleapis.com/auth/drive.metadata", - "https://www.googleapis.com/auth/drive.metadata.readonly", - "https://www.googleapis.com/auth/drive.photos.readonly", - "https://www.googleapis.com/auth/drive.readonly" - ] - } - } - }, - "changes": { - "methods": { - "getStartPageToken": { - "id": "drive.changes.getStartPageToken", - "path": "changes/startPageToken", - "httpMethod": "GET", - "description": "Gets the starting pageToken for listing future changes.", - "response": { - "$ref": "StartPageToken" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.appdata", - "https://www.googleapis.com/auth/drive.file", - "https://www.googleapis.com/auth/drive.metadata", - "https://www.googleapis.com/auth/drive.metadata.readonly", - "https://www.googleapis.com/auth/drive.photos.readonly", - "https://www.googleapis.com/auth/drive.readonly" - ] - }, - "list": { - "id": "drive.changes.list", - "path": "changes", - "httpMethod": "GET", - "description": "Lists changes for a user.", - "parameters": { - "includeRemoved": { - "type": "boolean", - "description": "Whether to include changes indicating that items have left the view of the changes list, for example by deletion or lost access.", - "default": "true", - "location": "query" - }, - "pageSize": { - "type": "integer", - "description": "The maximum number of changes to return per page.", - "default": "100", - "format": "int32", - "minimum": "1", - "maximum": "1000", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response or to the response from the getStartPageToken method.", - "required": true, - "location": "query" - }, - "restrictToMyDrive": { - "type": "boolean", - "description": "Whether to restrict the results to changes inside the My Drive hierarchy. This omits changes to files such as those in the Application Data folder or shared files which have not been added to My Drive.", - "default": "false", - "location": "query" - }, - "spaces": { - "type": "string", - "description": "A comma-separated list of spaces to query within the user corpus. Supported values are 'drive', 'appDataFolder' and 'photos'.", - "default": "drive", - "location": "query" - } - }, - "parameterOrder": [ - "pageToken" - ], - "response": { - "$ref": "ChangeList" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.appdata", - "https://www.googleapis.com/auth/drive.file", - "https://www.googleapis.com/auth/drive.metadata", - "https://www.googleapis.com/auth/drive.metadata.readonly", - "https://www.googleapis.com/auth/drive.photos.readonly", - "https://www.googleapis.com/auth/drive.readonly" - ], - "supportsSubscription": true - }, - "watch": { - "id": "drive.changes.watch", - "path": "changes/watch", - "httpMethod": "POST", - "description": "Subscribes to changes for a user.", - "parameters": { - "includeRemoved": { - "type": "boolean", - "description": "Whether to include changes indicating that items have left the view of the changes list, for example by deletion or lost access.", - "default": "true", - "location": "query" - }, - "pageSize": { - "type": "integer", - "description": "The maximum number of changes to return per page.", - "default": "100", - "format": "int32", - "minimum": "1", - "maximum": "1000", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response or to the response from the getStartPageToken method.", - "required": true, - "location": "query" - }, - "restrictToMyDrive": { - "type": "boolean", - "description": "Whether to restrict the results to changes inside the My Drive hierarchy. This omits changes to files such as those in the Application Data folder or shared files which have not been added to My Drive.", - "default": "false", - "location": "query" - }, - "spaces": { - "type": "string", - "description": "A comma-separated list of spaces to query within the user corpus. Supported values are 'drive', 'appDataFolder' and 'photos'.", - "default": "drive", - "location": "query" - } - }, - "parameterOrder": [ - "pageToken" - ], - "request": { - "$ref": "Channel", - "parameterName": "resource" - }, - "response": { - "$ref": "Channel" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.appdata", - "https://www.googleapis.com/auth/drive.file", - "https://www.googleapis.com/auth/drive.metadata", - "https://www.googleapis.com/auth/drive.metadata.readonly", - "https://www.googleapis.com/auth/drive.photos.readonly", - "https://www.googleapis.com/auth/drive.readonly" - ], - "supportsSubscription": true - } - } - }, - "channels": { - "methods": { - "stop": { - "id": "drive.channels.stop", - "path": "channels/stop", - "httpMethod": "POST", - "description": "Stop watching resources through this channel", - "request": { - "$ref": "Channel", - "parameterName": "resource" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.appdata", - "https://www.googleapis.com/auth/drive.file", - "https://www.googleapis.com/auth/drive.metadata", - "https://www.googleapis.com/auth/drive.metadata.readonly", - "https://www.googleapis.com/auth/drive.photos.readonly", - "https://www.googleapis.com/auth/drive.readonly" - ] - } - } - }, - "comments": { - "methods": { - "create": { - "id": "drive.comments.create", - "path": "files/{fileId}/comments", - "httpMethod": "POST", - "description": "Creates a new comment on a file.", - "parameters": { - "fileId": { - "type": "string", - "description": "The ID of the file.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "fileId" - ], - "request": { - "$ref": "Comment" - }, - "response": { - "$ref": "Comment" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.file" - ] - }, - "delete": { - "id": "drive.comments.delete", - "path": "files/{fileId}/comments/{commentId}", - "httpMethod": "DELETE", - "description": "Deletes a comment.", - "parameters": { - "commentId": { - "type": "string", - "description": "The ID of the comment.", - "required": true, - "location": "path" - }, - "fileId": { - "type": "string", - "description": "The ID of the file.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "fileId", - "commentId" - ], - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.file" - ] - }, - "get": { - "id": "drive.comments.get", - "path": "files/{fileId}/comments/{commentId}", - "httpMethod": "GET", - "description": "Gets a comment by ID.", - "parameters": { - "commentId": { - "type": "string", - "description": "The ID of the comment.", - "required": true, - "location": "path" - }, - "fileId": { - "type": "string", - "description": "The ID of the file.", - "required": true, - "location": "path" - }, - "includeDeleted": { - "type": "boolean", - "description": "Whether to return deleted comments. Deleted comments will not include their original content.", - "default": "false", - "location": "query" - } - }, - "parameterOrder": [ - "fileId", - "commentId" - ], - "response": { - "$ref": "Comment" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.file", - "https://www.googleapis.com/auth/drive.readonly" - ] - }, - "list": { - "id": "drive.comments.list", - "path": "files/{fileId}/comments", - "httpMethod": "GET", - "description": "Lists a file's comments.", - "parameters": { - "fileId": { - "type": "string", - "description": "The ID of the file.", - "required": true, - "location": "path" - }, - "includeDeleted": { - "type": "boolean", - "description": "Whether to include deleted comments. Deleted comments will not include their original content.", - "default": "false", - "location": "query" - }, - "pageSize": { - "type": "integer", - "description": "The maximum number of comments to return per page.", - "default": "20", - "format": "int32", - "minimum": "1", - "maximum": "100", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.", - "location": "query" - }, - "startModifiedTime": { - "type": "string", - "description": "The minimum value of 'modifiedTime' for the result comments (RFC 3339 date-time).", - "location": "query" - } - }, - "parameterOrder": [ - "fileId" - ], - "response": { - "$ref": "CommentList" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.file", - "https://www.googleapis.com/auth/drive.readonly" - ] - }, - "update": { - "id": "drive.comments.update", - "path": "files/{fileId}/comments/{commentId}", - "httpMethod": "PATCH", - "description": "Updates a comment with patch semantics.", - "parameters": { - "commentId": { - "type": "string", - "description": "The ID of the comment.", - "required": true, - "location": "path" - }, - "fileId": { - "type": "string", - "description": "The ID of the file.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "fileId", - "commentId" - ], - "request": { - "$ref": "Comment" - }, - "response": { - "$ref": "Comment" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.file" - ] + "userIp": { + "description": "Deprecated. Please use quotaUser instead.", + "location": "query", + "type": "string" } - } }, - "files": { - "methods": { - "copy": { - "id": "drive.files.copy", - "path": "files/{fileId}/copy", - "httpMethod": "POST", - "description": "Creates a copy of a file and applies any requested updates with patch semantics.", - "parameters": { - "fileId": { - "type": "string", - "description": "The ID of the file.", - "required": true, - "location": "path" - }, - "ignoreDefaultVisibility": { - "type": "boolean", - "description": "Whether to ignore the domain's default visibility settings for the created file. Domain administrators can choose to make all uploaded files visible to the domain by default; this parameter bypasses that behavior for the request. Permissions are still inherited from parent folders.", - "default": "false", - "location": "query" - }, - "keepRevisionForever": { - "type": "boolean", - "description": "Whether to set the 'keepForever' field in the new head revision. This is only applicable to files with binary content in Drive.", - "default": "false", - "location": "query" - }, - "ocrLanguage": { - "type": "string", - "description": "A language hint for OCR processing during image import (ISO 639-1 code).", - "location": "query" + "protocol": "rest", + "resources": { + "about": { + "methods": { + "get": { + "description": "Gets information about the user, the user's Drive, and system capabilities.", + "httpMethod": "GET", + "id": "drive.about.get", + "path": "about", + "response": { + "$ref": "About" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ] + } } - }, - "parameterOrder": [ - "fileId" - ], - "request": { - "$ref": "File" - }, - "response": { - "$ref": "File" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.appdata", - "https://www.googleapis.com/auth/drive.file", - "https://www.googleapis.com/auth/drive.photos.readonly" - ] }, - "create": { - "id": "drive.files.create", - "path": "files", - "httpMethod": "POST", - "description": "Creates a new file.", - "parameters": { - "ignoreDefaultVisibility": { - "type": "boolean", - "description": "Whether to ignore the domain's default visibility settings for the created file. Domain administrators can choose to make all uploaded files visible to the domain by default; this parameter bypasses that behavior for the request. Permissions are still inherited from parent folders.", - "default": "false", - "location": "query" - }, - "keepRevisionForever": { - "type": "boolean", - "description": "Whether to set the 'keepForever' field in the new head revision. This is only applicable to files with binary content in Drive.", - "default": "false", - "location": "query" - }, - "ocrLanguage": { - "type": "string", - "description": "A language hint for OCR processing during image import (ISO 639-1 code).", - "location": "query" - }, - "useContentAsIndexableText": { - "type": "boolean", - "description": "Whether to use the uploaded content as indexable text.", - "default": "false", - "location": "query" - } - }, - "request": { - "$ref": "File" - }, - "response": { - "$ref": "File" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.appdata", - "https://www.googleapis.com/auth/drive.file" - ], - "supportsMediaUpload": true, - "mediaUpload": { - "accept": [ - "*/*" - ], - "maxSize": "5120GB", - "protocols": { - "simple": { - "multipart": true, - "path": "/upload/drive/v3/files" - }, - "resumable": { - "multipart": true, - "path": "/resumable/upload/drive/v3/files" - } + "changes": { + "methods": { + "getStartPageToken": { + "description": "Gets the starting pageToken for listing future changes.", + "httpMethod": "GET", + "id": "drive.changes.getStartPageToken", + "parameters": { + "supportsTeamDrives": { + "default": "false", + "description": "Whether the requesting application supports Team Drives.", + "location": "query", + "type": "boolean" + }, + "teamDriveId": { + "description": "The ID of the Team Drive for which the starting pageToken for listing future changes from that Team Drive will be returned.", + "location": "query", + "type": "string" + } + }, + "path": "changes/startPageToken", + "response": { + "$ref": "StartPageToken" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "list": { + "description": "Lists the changes for a user or Team Drive.", + "httpMethod": "GET", + "id": "drive.changes.list", + "parameterOrder": [ + "pageToken" + ], + "parameters": { + "includeCorpusRemovals": { + "default": "false", + "description": "Whether changes should include the file resource if the file is still accessible by the user at the time of the request, even when a file was removed from the list of changes and there will be no further change entries for this file.", + "location": "query", + "type": "boolean" + }, + "includeRemoved": { + "default": "true", + "description": "Whether to include changes indicating that items have been removed from the list of changes, for example by deletion or loss of access.", + "location": "query", + "type": "boolean" + }, + "includeTeamDriveItems": { + "default": "false", + "description": "Whether Team Drive files or changes should be included in results.", + "location": "query", + "type": "boolean" + }, + "pageSize": { + "default": "100", + "description": "The maximum number of changes to return per page.", + "format": "int32", + "location": "query", + "maximum": "1000", + "minimum": "1", + "type": "integer" + }, + "pageToken": { + "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response or to the response from the getStartPageToken method.", + "location": "query", + "required": true, + "type": "string" + }, + "restrictToMyDrive": { + "default": "false", + "description": "Whether to restrict the results to changes inside the My Drive hierarchy. This omits changes to files such as those in the Application Data folder or shared files which have not been added to My Drive.", + "location": "query", + "type": "boolean" + }, + "spaces": { + "default": "drive", + "description": "A comma-separated list of spaces to query within the user corpus. Supported values are 'drive', 'appDataFolder' and 'photos'.", + "location": "query", + "type": "string" + }, + "supportsTeamDrives": { + "default": "false", + "description": "Whether the requesting application supports Team Drives.", + "location": "query", + "type": "boolean" + }, + "teamDriveId": { + "description": "The Team Drive from which changes will be returned. If specified the change IDs will be reflective of the Team Drive; use the combined Team Drive ID and change ID as an identifier.", + "location": "query", + "type": "string" + } + }, + "path": "changes", + "response": { + "$ref": "ChangeList" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ], + "supportsSubscription": true + }, + "watch": { + "description": "Subscribes to changes for a user.", + "httpMethod": "POST", + "id": "drive.changes.watch", + "parameterOrder": [ + "pageToken" + ], + "parameters": { + "includeCorpusRemovals": { + "default": "false", + "description": "Whether changes should include the file resource if the file is still accessible by the user at the time of the request, even when a file was removed from the list of changes and there will be no further change entries for this file.", + "location": "query", + "type": "boolean" + }, + "includeRemoved": { + "default": "true", + "description": "Whether to include changes indicating that items have been removed from the list of changes, for example by deletion or loss of access.", + "location": "query", + "type": "boolean" + }, + "includeTeamDriveItems": { + "default": "false", + "description": "Whether Team Drive files or changes should be included in results.", + "location": "query", + "type": "boolean" + }, + "pageSize": { + "default": "100", + "description": "The maximum number of changes to return per page.", + "format": "int32", + "location": "query", + "maximum": "1000", + "minimum": "1", + "type": "integer" + }, + "pageToken": { + "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response or to the response from the getStartPageToken method.", + "location": "query", + "required": true, + "type": "string" + }, + "restrictToMyDrive": { + "default": "false", + "description": "Whether to restrict the results to changes inside the My Drive hierarchy. This omits changes to files such as those in the Application Data folder or shared files which have not been added to My Drive.", + "location": "query", + "type": "boolean" + }, + "spaces": { + "default": "drive", + "description": "A comma-separated list of spaces to query within the user corpus. Supported values are 'drive', 'appDataFolder' and 'photos'.", + "location": "query", + "type": "string" + }, + "supportsTeamDrives": { + "default": "false", + "description": "Whether the requesting application supports Team Drives.", + "location": "query", + "type": "boolean" + }, + "teamDriveId": { + "description": "The Team Drive from which changes will be returned. If specified the change IDs will be reflective of the Team Drive; use the combined Team Drive ID and change ID as an identifier.", + "location": "query", + "type": "string" + } + }, + "path": "changes/watch", + "request": { + "$ref": "Channel", + "parameterName": "resource" + }, + "response": { + "$ref": "Channel" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ], + "supportsSubscription": true + } } - }, - "supportsSubscription": true }, - "delete": { - "id": "drive.files.delete", - "path": "files/{fileId}", - "httpMethod": "DELETE", - "description": "Permanently deletes a file owned by the user without moving it to the trash. If the target is a folder, all descendants owned by the user are also deleted.", - "parameters": { - "fileId": { - "type": "string", - "description": "The ID of the file.", - "required": true, - "location": "path" + "channels": { + "methods": { + "stop": { + "description": "Stop watching resources through this channel", + "httpMethod": "POST", + "id": "drive.channels.stop", + "path": "channels/stop", + "request": { + "$ref": "Channel", + "parameterName": "resource" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ] + } } - }, - "parameterOrder": [ - "fileId" - ], - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.appdata", - "https://www.googleapis.com/auth/drive.file" - ] }, - "emptyTrash": { - "id": "drive.files.emptyTrash", - "path": "files/trash", - "httpMethod": "DELETE", - "description": "Permanently deletes all of the user's trashed files.", - "scopes": [ - "https://www.googleapis.com/auth/drive" - ] - }, - "export": { - "id": "drive.files.export", - "path": "files/{fileId}/export", - "httpMethod": "GET", - "description": "Exports a Google Doc to the requested MIME type and returns the exported content.", - "parameters": { - "fileId": { - "type": "string", - "description": "The ID of the file.", - "required": true, - "location": "path" - }, - "mimeType": { - "type": "string", - "description": "The MIME type of the format requested for this export.", - "required": true, - "location": "query" + "comments": { + "methods": { + "create": { + "description": "Creates a new comment on a file.", + "httpMethod": "POST", + "id": "drive.comments.create", + "parameterOrder": [ + "fileId" + ], + "parameters": { + "fileId": { + "description": "The ID of the file.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "files/{fileId}/comments", + "request": { + "$ref": "Comment" + }, + "response": { + "$ref": "Comment" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file" + ] + }, + "delete": { + "description": "Deletes a comment.", + "httpMethod": "DELETE", + "id": "drive.comments.delete", + "parameterOrder": [ + "fileId", + "commentId" + ], + "parameters": { + "commentId": { + "description": "The ID of the comment.", + "location": "path", + "required": true, + "type": "string" + }, + "fileId": { + "description": "The ID of the file.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "files/{fileId}/comments/{commentId}", + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file" + ] + }, + "get": { + "description": "Gets a comment by ID.", + "httpMethod": "GET", + "id": "drive.comments.get", + "parameterOrder": [ + "fileId", + "commentId" + ], + "parameters": { + "commentId": { + "description": "The ID of the comment.", + "location": "path", + "required": true, + "type": "string" + }, + "fileId": { + "description": "The ID of the file.", + "location": "path", + "required": true, + "type": "string" + }, + "includeDeleted": { + "default": "false", + "description": "Whether to return deleted comments. Deleted comments will not include their original content.", + "location": "query", + "type": "boolean" + } + }, + "path": "files/{fileId}/comments/{commentId}", + "response": { + "$ref": "Comment" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "list": { + "description": "Lists a file's comments.", + "httpMethod": "GET", + "id": "drive.comments.list", + "parameterOrder": [ + "fileId" + ], + "parameters": { + "fileId": { + "description": "The ID of the file.", + "location": "path", + "required": true, + "type": "string" + }, + "includeDeleted": { + "default": "false", + "description": "Whether to include deleted comments. Deleted comments will not include their original content.", + "location": "query", + "type": "boolean" + }, + "pageSize": { + "default": "20", + "description": "The maximum number of comments to return per page.", + "format": "int32", + "location": "query", + "maximum": "100", + "minimum": "1", + "type": "integer" + }, + "pageToken": { + "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.", + "location": "query", + "type": "string" + }, + "startModifiedTime": { + "description": "The minimum value of 'modifiedTime' for the result comments (RFC 3339 date-time).", + "location": "query", + "type": "string" + } + }, + "path": "files/{fileId}/comments", + "response": { + "$ref": "CommentList" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "update": { + "description": "Updates a comment with patch semantics.", + "httpMethod": "PATCH", + "id": "drive.comments.update", + "parameterOrder": [ + "fileId", + "commentId" + ], + "parameters": { + "commentId": { + "description": "The ID of the comment.", + "location": "path", + "required": true, + "type": "string" + }, + "fileId": { + "description": "The ID of the file.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "files/{fileId}/comments/{commentId}", + "request": { + "$ref": "Comment" + }, + "response": { + "$ref": "Comment" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file" + ] + } } - }, - "parameterOrder": [ - "fileId", - "mimeType" - ], - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.file", - "https://www.googleapis.com/auth/drive.readonly" - ], - "supportsMediaDownload": true }, - "generateIds": { - "id": "drive.files.generateIds", - "path": "files/generateIds", - "httpMethod": "GET", - "description": "Generates a set of file IDs which can be provided in create requests.", - "parameters": { - "count": { - "type": "integer", - "description": "The number of IDs to return.", - "default": "10", - "format": "int32", - "minimum": "1", - "maximum": "1000", - "location": "query" - }, - "space": { - "type": "string", - "description": "The space in which the IDs can be used to create new files. Supported values are 'drive' and 'appDataFolder'.", - "default": "drive", - "location": "query" + "files": { + "methods": { + "copy": { + "description": "Creates a copy of a file and applies any requested updates with patch semantics.", + "httpMethod": "POST", + "id": "drive.files.copy", + "parameterOrder": [ + "fileId" + ], + "parameters": { + "fileId": { + "description": "The ID of the file.", + "location": "path", + "required": true, + "type": "string" + }, + "ignoreDefaultVisibility": { + "default": "false", + "description": "Whether to ignore the domain's default visibility settings for the created file. Domain administrators can choose to make all uploaded files visible to the domain by default; this parameter bypasses that behavior for the request. Permissions are still inherited from parent folders.", + "location": "query", + "type": "boolean" + }, + "keepRevisionForever": { + "default": "false", + "description": "Whether to set the 'keepForever' field in the new head revision. This is only applicable to files with binary content in Drive.", + "location": "query", + "type": "boolean" + }, + "ocrLanguage": { + "description": "A language hint for OCR processing during image import (ISO 639-1 code).", + "location": "query", + "type": "string" + }, + "supportsTeamDrives": { + "default": "false", + "description": "Whether the requesting application supports Team Drives.", + "location": "query", + "type": "boolean" + } + }, + "path": "files/{fileId}/copy", + "request": { + "$ref": "File" + }, + "response": { + "$ref": "File" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.photos.readonly" + ] + }, + "create": { + "description": "Creates a new file.", + "httpMethod": "POST", + "id": "drive.files.create", + "mediaUpload": { + "accept": [ + "*/*" + ], + "maxSize": "5120GB", + "protocols": { + "resumable": { + "multipart": true, + "path": "/resumable/upload/drive/v3/files" + }, + "simple": { + "multipart": true, + "path": "/upload/drive/v3/files" + } + } + }, + "parameters": { + "ignoreDefaultVisibility": { + "default": "false", + "description": "Whether to ignore the domain's default visibility settings for the created file. Domain administrators can choose to make all uploaded files visible to the domain by default; this parameter bypasses that behavior for the request. Permissions are still inherited from parent folders.", + "location": "query", + "type": "boolean" + }, + "keepRevisionForever": { + "default": "false", + "description": "Whether to set the 'keepForever' field in the new head revision. This is only applicable to files with binary content in Drive.", + "location": "query", + "type": "boolean" + }, + "ocrLanguage": { + "description": "A language hint for OCR processing during image import (ISO 639-1 code).", + "location": "query", + "type": "string" + }, + "supportsTeamDrives": { + "default": "false", + "description": "Whether the requesting application supports Team Drives.", + "location": "query", + "type": "boolean" + }, + "useContentAsIndexableText": { + "default": "false", + "description": "Whether to use the uploaded content as indexable text.", + "location": "query", + "type": "boolean" + } + }, + "path": "files", + "request": { + "$ref": "File" + }, + "response": { + "$ref": "File" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file" + ], + "supportsMediaUpload": true, + "supportsSubscription": true + }, + "delete": { + "description": "Permanently deletes a file owned by the user without moving it to the trash. If the file belongs to a Team Drive the user must be an organizer on the parent. If the target is a folder, all descendants owned by the user are also deleted.", + "httpMethod": "DELETE", + "id": "drive.files.delete", + "parameterOrder": [ + "fileId" + ], + "parameters": { + "fileId": { + "description": "The ID of the file.", + "location": "path", + "required": true, + "type": "string" + }, + "supportsTeamDrives": { + "default": "false", + "description": "Whether the requesting application supports Team Drives.", + "location": "query", + "type": "boolean" + } + }, + "path": "files/{fileId}", + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file" + ] + }, + "emptyTrash": { + "description": "Permanently deletes all of the user's trashed files.", + "httpMethod": "DELETE", + "id": "drive.files.emptyTrash", + "path": "files/trash", + "scopes": [ + "https://www.googleapis.com/auth/drive" + ] + }, + "export": { + "description": "Exports a Google Doc to the requested MIME type and returns the exported content. Please note that the exported content is limited to 10MB.", + "httpMethod": "GET", + "id": "drive.files.export", + "parameterOrder": [ + "fileId", + "mimeType" + ], + "parameters": { + "fileId": { + "description": "The ID of the file.", + "location": "path", + "required": true, + "type": "string" + }, + "mimeType": { + "description": "The MIME type of the format requested for this export.", + "location": "query", + "required": true, + "type": "string" + } + }, + "path": "files/{fileId}/export", + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.readonly" + ], + "supportsMediaDownload": true + }, + "generateIds": { + "description": "Generates a set of file IDs which can be provided in create requests.", + "httpMethod": "GET", + "id": "drive.files.generateIds", + "parameters": { + "count": { + "default": "10", + "description": "The number of IDs to return.", + "format": "int32", + "location": "query", + "maximum": "1000", + "minimum": "1", + "type": "integer" + }, + "space": { + "default": "drive", + "description": "The space in which the IDs can be used to create new files. Supported values are 'drive' and 'appDataFolder'.", + "location": "query", + "type": "string" + } + }, + "path": "files/generateIds", + "response": { + "$ref": "GeneratedIds" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file" + ] + }, + "get": { + "description": "Gets a file's metadata or content by ID.", + "httpMethod": "GET", + "id": "drive.files.get", + "parameterOrder": [ + "fileId" + ], + "parameters": { + "acknowledgeAbuse": { + "default": "false", + "description": "Whether the user is acknowledging the risk of downloading known malware or other abusive files. This is only applicable when alt=media.", + "location": "query", + "type": "boolean" + }, + "fileId": { + "description": "The ID of the file.", + "location": "path", + "required": true, + "type": "string" + }, + "supportsTeamDrives": { + "default": "false", + "description": "Whether the requesting application supports Team Drives.", + "location": "query", + "type": "boolean" + } + }, + "path": "files/{fileId}", + "response": { + "$ref": "File" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ], + "supportsMediaDownload": true, + "supportsSubscription": true, + "useMediaDownloadService": true + }, + "list": { + "description": "Lists or searches files.", + "httpMethod": "GET", + "id": "drive.files.list", + "parameters": { + "corpora": { + "description": "Comma-separated list of bodies of items (files/documents) to which the query applies. Supported bodies are 'user', 'domain', 'teamDrive' and 'allTeamDrives'. 'allTeamDrives' must be combined with 'user'; all other values must be used in isolation. Prefer 'user' or 'teamDrive' to 'allTeamDrives' for efficiency.", + "location": "query", + "type": "string" + }, + "corpus": { + "description": "The source of files to list. Deprecated: use 'corpora' instead.", + "enum": [ + "domain", + "user" + ], + "enumDescriptions": [ + "Files shared to the user's domain.", + "Files owned by or shared to the user." + ], + "location": "query", + "type": "string" + }, + "includeTeamDriveItems": { + "default": "false", + "description": "Whether Team Drive items should be included in results.", + "location": "query", + "type": "boolean" + }, + "orderBy": { + "description": "A comma-separated list of sort keys. Valid keys are 'createdTime', 'folder', 'modifiedByMeTime', 'modifiedTime', 'name', 'name_natural', 'quotaBytesUsed', 'recency', 'sharedWithMeTime', 'starred', and 'viewedByMeTime'. Each key sorts ascending by default, but may be reversed with the 'desc' modifier. Example usage: ?orderBy=folder,modifiedTime desc,name. Please note that there is a current limitation for users with approximately one million files in which the requested sort order is ignored.", + "location": "query", + "type": "string" + }, + "pageSize": { + "default": "100", + "description": "The maximum number of files to return per page. Partial or empty result pages are possible even before the end of the files list has been reached.", + "format": "int32", + "location": "query", + "maximum": "1000", + "minimum": "1", + "type": "integer" + }, + "pageToken": { + "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.", + "location": "query", + "type": "string" + }, + "q": { + "description": "A query for filtering the file results. See the \"Search for Files\" guide for supported syntax.", + "location": "query", + "type": "string" + }, + "spaces": { + "default": "drive", + "description": "A comma-separated list of spaces to query within the corpus. Supported values are 'drive', 'appDataFolder' and 'photos'.", + "location": "query", + "type": "string" + }, + "supportsTeamDrives": { + "default": "false", + "description": "Whether the requesting application supports Team Drives.", + "location": "query", + "type": "boolean" + }, + "teamDriveId": { + "description": "ID of Team Drive to search.", + "location": "query", + "type": "string" + } + }, + "path": "files", + "response": { + "$ref": "FileList" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "update": { + "description": "Updates a file's metadata and/or content with patch semantics.", + "httpMethod": "PATCH", + "id": "drive.files.update", + "mediaUpload": { + "accept": [ + "*/*" + ], + "maxSize": "5120GB", + "protocols": { + "resumable": { + "multipart": true, + "path": "/resumable/upload/drive/v3/files/{fileId}" + }, + "simple": { + "multipart": true, + "path": "/upload/drive/v3/files/{fileId}" + } + } + }, + "parameterOrder": [ + "fileId" + ], + "parameters": { + "addParents": { + "description": "A comma-separated list of parent IDs to add.", + "location": "query", + "type": "string" + }, + "fileId": { + "description": "The ID of the file.", + "location": "path", + "required": true, + "type": "string" + }, + "keepRevisionForever": { + "default": "false", + "description": "Whether to set the 'keepForever' field in the new head revision. This is only applicable to files with binary content in Drive.", + "location": "query", + "type": "boolean" + }, + "ocrLanguage": { + "description": "A language hint for OCR processing during image import (ISO 639-1 code).", + "location": "query", + "type": "string" + }, + "removeParents": { + "description": "A comma-separated list of parent IDs to remove.", + "location": "query", + "type": "string" + }, + "supportsTeamDrives": { + "default": "false", + "description": "Whether the requesting application supports Team Drives.", + "location": "query", + "type": "boolean" + }, + "useContentAsIndexableText": { + "default": "false", + "description": "Whether to use the uploaded content as indexable text.", + "location": "query", + "type": "boolean" + } + }, + "path": "files/{fileId}", + "request": { + "$ref": "File" + }, + "response": { + "$ref": "File" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.scripts" + ], + "supportsMediaUpload": true + }, + "watch": { + "description": "Subscribes to changes to a file", + "httpMethod": "POST", + "id": "drive.files.watch", + "parameterOrder": [ + "fileId" + ], + "parameters": { + "acknowledgeAbuse": { + "default": "false", + "description": "Whether the user is acknowledging the risk of downloading known malware or other abusive files. This is only applicable when alt=media.", + "location": "query", + "type": "boolean" + }, + "fileId": { + "description": "The ID of the file.", + "location": "path", + "required": true, + "type": "string" + }, + "supportsTeamDrives": { + "default": "false", + "description": "Whether the requesting application supports Team Drives.", + "location": "query", + "type": "boolean" + } + }, + "path": "files/{fileId}/watch", + "request": { + "$ref": "Channel", + "parameterName": "resource" + }, + "response": { + "$ref": "Channel" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ], + "supportsMediaDownload": true, + "supportsSubscription": true, + "useMediaDownloadService": true + } } - }, - "response": { - "$ref": "GeneratedIds" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.appdata", - "https://www.googleapis.com/auth/drive.file" - ] }, - "get": { - "id": "drive.files.get", - "path": "files/{fileId}", - "httpMethod": "GET", - "description": "Gets a file's metadata or content by ID.", - "parameters": { - "acknowledgeAbuse": { - "type": "boolean", - "description": "Whether the user is acknowledging the risk of downloading known malware or other abusive files. This is only applicable when alt=media.", - "default": "false", - "location": "query" - }, - "fileId": { - "type": "string", - "description": "The ID of the file.", - "required": true, - "location": "path" + "permissions": { + "methods": { + "create": { + "description": "Creates a permission for a file or Team Drive.", + "httpMethod": "POST", + "id": "drive.permissions.create", + "parameterOrder": [ + "fileId" + ], + "parameters": { + "emailMessage": { + "description": "A plain text custom message to include in the notification email.", + "location": "query", + "type": "string" + }, + "fileId": { + "description": "The ID of the file or Team Drive.", + "location": "path", + "required": true, + "type": "string" + }, + "sendNotificationEmail": { + "description": "Whether to send a notification email when sharing to users or groups. This defaults to true for users and groups, and is not allowed for other requests. It must not be disabled for ownership transfers.", + "location": "query", + "type": "boolean" + }, + "supportsTeamDrives": { + "default": "false", + "description": "Whether the requesting application supports Team Drives.", + "location": "query", + "type": "boolean" + }, + "transferOwnership": { + "default": "false", + "description": "Whether to transfer ownership to the specified user and downgrade the current owner to a writer. This parameter is required as an acknowledgement of the side effect.", + "location": "query", + "type": "boolean" + }, + "useDomainAdminAccess": { + "default": "false", + "description": "Issue the request as a domain administrator; if set to true, then the requester will be granted access if they are an administrator of the domain to which the item belongs.", + "location": "query", + "type": "boolean" + } + }, + "path": "files/{fileId}/permissions", + "request": { + "$ref": "Permission" + }, + "response": { + "$ref": "Permission" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file" + ] + }, + "delete": { + "description": "Deletes a permission.", + "httpMethod": "DELETE", + "id": "drive.permissions.delete", + "parameterOrder": [ + "fileId", + "permissionId" + ], + "parameters": { + "fileId": { + "description": "The ID of the file or Team Drive.", + "location": "path", + "required": true, + "type": "string" + }, + "permissionId": { + "description": "The ID of the permission.", + "location": "path", + "required": true, + "type": "string" + }, + "supportsTeamDrives": { + "default": "false", + "description": "Whether the requesting application supports Team Drives.", + "location": "query", + "type": "boolean" + }, + "useDomainAdminAccess": { + "default": "false", + "description": "Issue the request as a domain administrator; if set to true, then the requester will be granted access if they are an administrator of the domain to which the item belongs.", + "location": "query", + "type": "boolean" + } + }, + "path": "files/{fileId}/permissions/{permissionId}", + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file" + ] + }, + "get": { + "description": "Gets a permission by ID.", + "httpMethod": "GET", + "id": "drive.permissions.get", + "parameterOrder": [ + "fileId", + "permissionId" + ], + "parameters": { + "fileId": { + "description": "The ID of the file.", + "location": "path", + "required": true, + "type": "string" + }, + "permissionId": { + "description": "The ID of the permission.", + "location": "path", + "required": true, + "type": "string" + }, + "supportsTeamDrives": { + "default": "false", + "description": "Whether the requesting application supports Team Drives.", + "location": "query", + "type": "boolean" + }, + "useDomainAdminAccess": { + "default": "false", + "description": "Issue the request as a domain administrator; if set to true, then the requester will be granted access if they are an administrator of the domain to which the item belongs.", + "location": "query", + "type": "boolean" + } + }, + "path": "files/{fileId}/permissions/{permissionId}", + "response": { + "$ref": "Permission" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "list": { + "description": "Lists a file's or Team Drive's permissions.", + "httpMethod": "GET", + "id": "drive.permissions.list", + "parameterOrder": [ + "fileId" + ], + "parameters": { + "fileId": { + "description": "The ID of the file or Team Drive.", + "location": "path", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "The maximum number of permissions to return per page. When not set for files in a Team Drive, at most 100 results will be returned. When not set for files that are not in a Team Drive, the entire list will be returned.", + "format": "int32", + "location": "query", + "maximum": "100", + "minimum": "1", + "type": "integer" + }, + "pageToken": { + "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.", + "location": "query", + "type": "string" + }, + "supportsTeamDrives": { + "default": "false", + "description": "Whether the requesting application supports Team Drives.", + "location": "query", + "type": "boolean" + }, + "useDomainAdminAccess": { + "default": "false", + "description": "Issue the request as a domain administrator; if set to true, then the requester will be granted access if they are an administrator of the domain to which the item belongs.", + "location": "query", + "type": "boolean" + } + }, + "path": "files/{fileId}/permissions", + "response": { + "$ref": "PermissionList" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "update": { + "description": "Updates a permission with patch semantics.", + "httpMethod": "PATCH", + "id": "drive.permissions.update", + "parameterOrder": [ + "fileId", + "permissionId" + ], + "parameters": { + "fileId": { + "description": "The ID of the file or Team Drive.", + "location": "path", + "required": true, + "type": "string" + }, + "permissionId": { + "description": "The ID of the permission.", + "location": "path", + "required": true, + "type": "string" + }, + "removeExpiration": { + "default": "false", + "description": "Whether to remove the expiration date.", + "location": "query", + "type": "boolean" + }, + "supportsTeamDrives": { + "default": "false", + "description": "Whether the requesting application supports Team Drives.", + "location": "query", + "type": "boolean" + }, + "transferOwnership": { + "default": "false", + "description": "Whether to transfer ownership to the specified user and downgrade the current owner to a writer. This parameter is required as an acknowledgement of the side effect.", + "location": "query", + "type": "boolean" + }, + "useDomainAdminAccess": { + "default": "false", + "description": "Issue the request as a domain administrator; if set to true, then the requester will be granted access if they are an administrator of the domain to which the item belongs.", + "location": "query", + "type": "boolean" + } + }, + "path": "files/{fileId}/permissions/{permissionId}", + "request": { + "$ref": "Permission" + }, + "response": { + "$ref": "Permission" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file" + ] + } } - }, - "parameterOrder": [ - "fileId" - ], - "response": { - "$ref": "File" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.appdata", - "https://www.googleapis.com/auth/drive.file", - "https://www.googleapis.com/auth/drive.metadata", - "https://www.googleapis.com/auth/drive.metadata.readonly", - "https://www.googleapis.com/auth/drive.photos.readonly", - "https://www.googleapis.com/auth/drive.readonly" - ], - "supportsMediaDownload": true, - "useMediaDownloadService": true, - "supportsSubscription": true }, - "list": { - "id": "drive.files.list", - "path": "files", - "httpMethod": "GET", - "description": "Lists or searches files.", - "parameters": { - "corpus": { - "type": "string", - "description": "The source of files to list.", - "default": "user", - "enum": [ - "domain", - "user" - ], - "enumDescriptions": [ - "Files shared to the user's domain.", - "Files owned by or shared to the user." - ], - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "A comma-separated list of sort keys. Valid keys are 'createdTime', 'folder', 'modifiedByMeTime', 'modifiedTime', 'name', 'quotaBytesUsed', 'recency', 'sharedWithMeTime', 'starred', and 'viewedByMeTime'. Each key sorts ascending by default, but may be reversed with the 'desc' modifier. Example usage: ?orderBy=folder,modifiedTime desc,name. Please note that there is a current limitation for users with approximately one million files in which the requested sort order is ignored.", - "location": "query" - }, - "pageSize": { - "type": "integer", - "description": "The maximum number of files to return per page.", - "default": "100", - "format": "int32", - "minimum": "1", - "maximum": "1000", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.", - "location": "query" - }, - "q": { - "type": "string", - "description": "A query for filtering the file results. See the \"Search for Files\" guide for supported syntax.", - "location": "query" - }, - "spaces": { - "type": "string", - "description": "A comma-separated list of spaces to query within the corpus. Supported values are 'drive', 'appDataFolder' and 'photos'.", - "default": "drive", - "location": "query" + "replies": { + "methods": { + "create": { + "description": "Creates a new reply to a comment.", + "httpMethod": "POST", + "id": "drive.replies.create", + "parameterOrder": [ + "fileId", + "commentId" + ], + "parameters": { + "commentId": { + "description": "The ID of the comment.", + "location": "path", + "required": true, + "type": "string" + }, + "fileId": { + "description": "The ID of the file.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "files/{fileId}/comments/{commentId}/replies", + "request": { + "$ref": "Reply" + }, + "response": { + "$ref": "Reply" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file" + ] + }, + "delete": { + "description": "Deletes a reply.", + "httpMethod": "DELETE", + "id": "drive.replies.delete", + "parameterOrder": [ + "fileId", + "commentId", + "replyId" + ], + "parameters": { + "commentId": { + "description": "The ID of the comment.", + "location": "path", + "required": true, + "type": "string" + }, + "fileId": { + "description": "The ID of the file.", + "location": "path", + "required": true, + "type": "string" + }, + "replyId": { + "description": "The ID of the reply.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "files/{fileId}/comments/{commentId}/replies/{replyId}", + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file" + ] + }, + "get": { + "description": "Gets a reply by ID.", + "httpMethod": "GET", + "id": "drive.replies.get", + "parameterOrder": [ + "fileId", + "commentId", + "replyId" + ], + "parameters": { + "commentId": { + "description": "The ID of the comment.", + "location": "path", + "required": true, + "type": "string" + }, + "fileId": { + "description": "The ID of the file.", + "location": "path", + "required": true, + "type": "string" + }, + "includeDeleted": { + "default": "false", + "description": "Whether to return deleted replies. Deleted replies will not include their original content.", + "location": "query", + "type": "boolean" + }, + "replyId": { + "description": "The ID of the reply.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "files/{fileId}/comments/{commentId}/replies/{replyId}", + "response": { + "$ref": "Reply" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "list": { + "description": "Lists a comment's replies.", + "httpMethod": "GET", + "id": "drive.replies.list", + "parameterOrder": [ + "fileId", + "commentId" + ], + "parameters": { + "commentId": { + "description": "The ID of the comment.", + "location": "path", + "required": true, + "type": "string" + }, + "fileId": { + "description": "The ID of the file.", + "location": "path", + "required": true, + "type": "string" + }, + "includeDeleted": { + "default": "false", + "description": "Whether to include deleted replies. Deleted replies will not include their original content.", + "location": "query", + "type": "boolean" + }, + "pageSize": { + "default": "20", + "description": "The maximum number of replies to return per page.", + "format": "int32", + "location": "query", + "maximum": "100", + "minimum": "1", + "type": "integer" + }, + "pageToken": { + "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.", + "location": "query", + "type": "string" + } + }, + "path": "files/{fileId}/comments/{commentId}/replies", + "response": { + "$ref": "ReplyList" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "update": { + "description": "Updates a reply with patch semantics.", + "httpMethod": "PATCH", + "id": "drive.replies.update", + "parameterOrder": [ + "fileId", + "commentId", + "replyId" + ], + "parameters": { + "commentId": { + "description": "The ID of the comment.", + "location": "path", + "required": true, + "type": "string" + }, + "fileId": { + "description": "The ID of the file.", + "location": "path", + "required": true, + "type": "string" + }, + "replyId": { + "description": "The ID of the reply.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "files/{fileId}/comments/{commentId}/replies/{replyId}", + "request": { + "$ref": "Reply" + }, + "response": { + "$ref": "Reply" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file" + ] + } } - }, - "response": { - "$ref": "FileList" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.appdata", - "https://www.googleapis.com/auth/drive.file", - "https://www.googleapis.com/auth/drive.metadata", - "https://www.googleapis.com/auth/drive.metadata.readonly", - "https://www.googleapis.com/auth/drive.photos.readonly", - "https://www.googleapis.com/auth/drive.readonly" - ] }, - "update": { - "id": "drive.files.update", - "path": "files/{fileId}", - "httpMethod": "PATCH", - "description": "Updates a file's metadata and/or content with patch semantics.", - "parameters": { - "addParents": { - "type": "string", - "description": "A comma-separated list of parent IDs to add.", - "location": "query" - }, - "fileId": { - "type": "string", - "description": "The ID of the file.", - "required": true, - "location": "path" - }, - "keepRevisionForever": { - "type": "boolean", - "description": "Whether to set the 'keepForever' field in the new head revision. This is only applicable to files with binary content in Drive.", - "default": "false", - "location": "query" - }, - "ocrLanguage": { - "type": "string", - "description": "A language hint for OCR processing during image import (ISO 639-1 code).", - "location": "query" - }, - "removeParents": { - "type": "string", - "description": "A comma-separated list of parent IDs to remove.", - "location": "query" - }, - "useContentAsIndexableText": { - "type": "boolean", - "description": "Whether to use the uploaded content as indexable text.", - "default": "false", - "location": "query" - } - }, - "parameterOrder": [ - "fileId" - ], - "request": { - "$ref": "File" - }, - "response": { - "$ref": "File" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.appdata", - "https://www.googleapis.com/auth/drive.file", - "https://www.googleapis.com/auth/drive.metadata", - "https://www.googleapis.com/auth/drive.scripts" - ], - "supportsMediaUpload": true, - "mediaUpload": { - "accept": [ - "*/*" - ], - "maxSize": "5120GB", - "protocols": { - "simple": { - "multipart": true, - "path": "/upload/drive/v3/files/{fileId}" - }, - "resumable": { - "multipart": true, - "path": "/resumable/upload/drive/v3/files/{fileId}" - } + "revisions": { + "methods": { + "delete": { + "description": "Permanently deletes a revision. This method is only applicable to files with binary content in Drive.", + "httpMethod": "DELETE", + "id": "drive.revisions.delete", + "parameterOrder": [ + "fileId", + "revisionId" + ], + "parameters": { + "fileId": { + "description": "The ID of the file.", + "location": "path", + "required": true, + "type": "string" + }, + "revisionId": { + "description": "The ID of the revision.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "files/{fileId}/revisions/{revisionId}", + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file" + ] + }, + "get": { + "description": "Gets a revision's metadata or content by ID.", + "httpMethod": "GET", + "id": "drive.revisions.get", + "parameterOrder": [ + "fileId", + "revisionId" + ], + "parameters": { + "acknowledgeAbuse": { + "default": "false", + "description": "Whether the user is acknowledging the risk of downloading known malware or other abusive files. This is only applicable when alt=media.", + "location": "query", + "type": "boolean" + }, + "fileId": { + "description": "The ID of the file.", + "location": "path", + "required": true, + "type": "string" + }, + "revisionId": { + "description": "The ID of the revision.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "files/{fileId}/revisions/{revisionId}", + "response": { + "$ref": "Revision" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ], + "supportsMediaDownload": true, + "useMediaDownloadService": true + }, + "list": { + "description": "Lists a file's revisions.", + "httpMethod": "GET", + "id": "drive.revisions.list", + "parameterOrder": [ + "fileId" + ], + "parameters": { + "fileId": { + "description": "The ID of the file.", + "location": "path", + "required": true, + "type": "string" + }, + "pageSize": { + "default": "200", + "description": "The maximum number of revisions to return per page.", + "format": "int32", + "location": "query", + "maximum": "1000", + "minimum": "1", + "type": "integer" + }, + "pageToken": { + "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.", + "location": "query", + "type": "string" + } + }, + "path": "files/{fileId}/revisions", + "response": { + "$ref": "RevisionList" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "update": { + "description": "Updates a revision with patch semantics.", + "httpMethod": "PATCH", + "id": "drive.revisions.update", + "parameterOrder": [ + "fileId", + "revisionId" + ], + "parameters": { + "fileId": { + "description": "The ID of the file.", + "location": "path", + "required": true, + "type": "string" + }, + "revisionId": { + "description": "The ID of the revision.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "files/{fileId}/revisions/{revisionId}", + "request": { + "$ref": "Revision" + }, + "response": { + "$ref": "Revision" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file" + ] + } } - } }, - "watch": { - "id": "drive.files.watch", - "path": "files/{fileId}/watch", - "httpMethod": "POST", - "description": "Subscribes to changes to a file", - "parameters": { - "acknowledgeAbuse": { - "type": "boolean", - "description": "Whether the user is acknowledging the risk of downloading known malware or other abusive files. This is only applicable when alt=media.", - "default": "false", - "location": "query" - }, - "fileId": { - "type": "string", - "description": "The ID of the file.", - "required": true, - "location": "path" + "teamdrives": { + "methods": { + "create": { + "description": "Creates a new Team Drive.", + "httpMethod": "POST", + "id": "drive.teamdrives.create", + "parameterOrder": [ + "requestId" + ], + "parameters": { + "requestId": { + "description": "An ID, such as a random UUID, which uniquely identifies this user's request for idempotent creation of a Team Drive. A repeated request by the same user and with the same request ID will avoid creating duplicates by attempting to create the same Team Drive. If the Team Drive already exists a 409 error will be returned.", + "location": "query", + "required": true, + "type": "string" + } + }, + "path": "teamdrives", + "request": { + "$ref": "TeamDrive" + }, + "response": { + "$ref": "TeamDrive" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive" + ] + }, + "delete": { + "description": "Permanently deletes a Team Drive for which the user is an organizer. The Team Drive cannot contain any untrashed items.", + "httpMethod": "DELETE", + "id": "drive.teamdrives.delete", + "parameterOrder": [ + "teamDriveId" + ], + "parameters": { + "teamDriveId": { + "description": "The ID of the Team Drive", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "teamdrives/{teamDriveId}", + "scopes": [ + "https://www.googleapis.com/auth/drive" + ] + }, + "get": { + "description": "Gets a Team Drive's metadata by ID.", + "httpMethod": "GET", + "id": "drive.teamdrives.get", + "parameterOrder": [ + "teamDriveId" + ], + "parameters": { + "teamDriveId": { + "description": "The ID of the Team Drive", + "location": "path", + "required": true, + "type": "string" + }, + "useDomainAdminAccess": { + "default": "false", + "description": "Issue the request as a domain administrator; if set to true, then the requester will be granted access if they are an administrator of the domain to which the Team Drive belongs.", + "location": "query", + "type": "boolean" + } + }, + "path": "teamdrives/{teamDriveId}", + "response": { + "$ref": "TeamDrive" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "list": { + "description": "Lists the user's Team Drives.", + "httpMethod": "GET", + "id": "drive.teamdrives.list", + "parameters": { + "pageSize": { + "default": "10", + "description": "Maximum number of Team Drives to return.", + "format": "int32", + "location": "query", + "maximum": "100", + "minimum": "1", + "type": "integer" + }, + "pageToken": { + "description": "Page token for Team Drives.", + "location": "query", + "type": "string" + }, + "q": { + "description": "Query string for searching Team Drives.", + "location": "query", + "type": "string" + }, + "useDomainAdminAccess": { + "default": "false", + "description": "Issue the request as a domain administrator; if set to true, then all Team Drives of the domain in which the requester is an administrator are returned.", + "location": "query", + "type": "boolean" + } + }, + "path": "teamdrives", + "response": { + "$ref": "TeamDriveList" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "update": { + "description": "Updates a Team Drive's metadata", + "httpMethod": "PATCH", + "id": "drive.teamdrives.update", + "parameterOrder": [ + "teamDriveId" + ], + "parameters": { + "teamDriveId": { + "description": "The ID of the Team Drive", + "location": "path", + "required": true, + "type": "string" + }, + "useDomainAdminAccess": { + "default": "false", + "description": "Issue the request as a domain administrator; if set to true, then the requester will be granted access if they are an administrator of the domain to which the Team Drive belongs.", + "location": "query", + "type": "boolean" + } + }, + "path": "teamdrives/{teamDriveId}", + "request": { + "$ref": "TeamDrive" + }, + "response": { + "$ref": "TeamDrive" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive" + ] + } } - }, - "parameterOrder": [ - "fileId" - ], - "request": { - "$ref": "Channel", - "parameterName": "resource" - }, - "response": { - "$ref": "Channel" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.appdata", - "https://www.googleapis.com/auth/drive.file", - "https://www.googleapis.com/auth/drive.metadata", - "https://www.googleapis.com/auth/drive.metadata.readonly", - "https://www.googleapis.com/auth/drive.photos.readonly", - "https://www.googleapis.com/auth/drive.readonly" - ], - "supportsMediaDownload": true, - "useMediaDownloadService": true, - "supportsSubscription": true } - } }, - "permissions": { - "methods": { - "create": { - "id": "drive.permissions.create", - "path": "files/{fileId}/permissions", - "httpMethod": "POST", - "description": "Creates a permission for a file.", - "parameters": { - "emailMessage": { - "type": "string", - "description": "A custom message to include in the notification email.", - "location": "query" - }, - "fileId": { - "type": "string", - "description": "The ID of the file.", - "required": true, - "location": "path" - }, - "sendNotificationEmail": { - "type": "boolean", - "description": "Whether to send a notification email when sharing to users or groups. This defaults to true for users and groups, and is not allowed for other requests. It must not be disabled for ownership transfers.", - "location": "query" + "revision": "20181213", + "rootUrl": "https://www.googleapis.com/", + "schemas": { + "About": { + "description": "Information about the user, the user's Drive, and system capabilities.", + "id": "About", + "properties": { + "appInstalled": { + "description": "Whether the user has installed the requesting app.", + "type": "boolean" + }, + "canCreateTeamDrives": { + "description": "Whether the user can create Team Drives.", + "type": "boolean" + }, + "exportFormats": { + "additionalProperties": { + "items": { + "type": "string" + }, + "type": "array" + }, + "description": "A map of source MIME type to possible targets for all supported exports.", + "type": "object" + }, + "folderColorPalette": { + "description": "The currently supported folder colors as RGB hex strings.", + "items": { + "type": "string" + }, + "type": "array" + }, + "importFormats": { + "additionalProperties": { + "items": { + "type": "string" + }, + "type": "array" + }, + "description": "A map of source MIME type to possible targets for all supported imports.", + "type": "object" + }, + "kind": { + "default": "drive#about", + "description": "Identifies what kind of resource this is. Value: the fixed string \"drive#about\".", + "type": "string" + }, + "maxImportSizes": { + "additionalProperties": { + "format": "int64", + "type": "string" + }, + "description": "A map of maximum import sizes by MIME type, in bytes.", + "type": "object" + }, + "maxUploadSize": { + "description": "The maximum upload size in bytes.", + "format": "int64", + "type": "string" + }, + "storageQuota": { + "description": "The user's storage quota limits and usage. All fields are measured in bytes.", + "properties": { + "limit": { + "description": "The usage limit, if applicable. This will not be present if the user has unlimited storage.", + "format": "int64", + "type": "string" + }, + "usage": { + "description": "The total usage across all services.", + "format": "int64", + "type": "string" + }, + "usageInDrive": { + "description": "The usage by all files in Google Drive.", + "format": "int64", + "type": "string" + }, + "usageInDriveTrash": { + "description": "The usage by trashed files in Google Drive.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "teamDriveThemes": { + "description": "A list of themes that are supported for Team Drives.", + "items": { + "properties": { + "backgroundImageLink": { + "description": "A link to this Team Drive theme's background image.", + "type": "string" + }, + "colorRgb": { + "description": "The color of this Team Drive theme as an RGB hex string.", + "type": "string" + }, + "id": { + "description": "The ID of the theme.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "user": { + "$ref": "User", + "description": "The authenticated user." + } }, - "transferOwnership": { - "type": "boolean", - "description": "Whether to transfer ownership to the specified user and downgrade the current owner to a writer. This parameter is required as an acknowledgement of the side effect.", - "default": "false", - "location": "query" - } - }, - "parameterOrder": [ - "fileId" - ], - "request": { - "$ref": "Permission" - }, - "response": { - "$ref": "Permission" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.file" - ] + "type": "object" }, - "delete": { - "id": "drive.permissions.delete", - "path": "files/{fileId}/permissions/{permissionId}", - "httpMethod": "DELETE", - "description": "Deletes a permission.", - "parameters": { - "fileId": { - "type": "string", - "description": "The ID of the file.", - "required": true, - "location": "path" + "Change": { + "description": "A change to a file or Team Drive.", + "id": "Change", + "properties": { + "file": { + "$ref": "File", + "description": "The updated state of the file. Present if the type is file and the file has not been removed from this list of changes." + }, + "fileId": { + "description": "The ID of the file which has changed.", + "type": "string" + }, + "kind": { + "default": "drive#change", + "description": "Identifies what kind of resource this is. Value: the fixed string \"drive#change\".", + "type": "string" + }, + "removed": { + "description": "Whether the file or Team Drive has been removed from this list of changes, for example by deletion or loss of access.", + "type": "boolean" + }, + "teamDrive": { + "$ref": "TeamDrive", + "description": "The updated state of the Team Drive. Present if the type is teamDrive, the user is still a member of the Team Drive, and the Team Drive has not been removed." + }, + "teamDriveId": { + "description": "The ID of the Team Drive associated with this change.", + "type": "string" + }, + "time": { + "description": "The time of this change (RFC 3339 date-time).", + "format": "date-time", + "type": "string" + }, + "type": { + "description": "The type of the change. Possible values are file and teamDrive.", + "type": "string" + } }, - "permissionId": { - "type": "string", - "description": "The ID of the permission.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "fileId", - "permissionId" - ], - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.file" - ] - }, - "get": { - "id": "drive.permissions.get", - "path": "files/{fileId}/permissions/{permissionId}", - "httpMethod": "GET", - "description": "Gets a permission by ID.", - "parameters": { - "fileId": { - "type": "string", - "description": "The ID of the file.", - "required": true, - "location": "path" + "type": "object" + }, + "ChangeList": { + "description": "A list of changes for a user.", + "id": "ChangeList", + "properties": { + "changes": { + "description": "The list of changes. If nextPageToken is populated, then this list may be incomplete and an additional page of results should be fetched.", + "items": { + "$ref": "Change" + }, + "type": "array" + }, + "kind": { + "default": "drive#changeList", + "description": "Identifies what kind of resource this is. Value: the fixed string \"drive#changeList\".", + "type": "string" + }, + "newStartPageToken": { + "description": "The starting page token for future changes. This will be present only if the end of the current changes list has been reached.", + "type": "string" + }, + "nextPageToken": { + "description": "The page token for the next page of changes. This will be absent if the end of the changes list has been reached. If the token is rejected for any reason, it should be discarded, and pagination should be restarted from the first page of results.", + "type": "string" + } }, - "permissionId": { - "type": "string", - "description": "The ID of the permission.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "fileId", - "permissionId" - ], - "response": { - "$ref": "Permission" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.file", - "https://www.googleapis.com/auth/drive.metadata", - "https://www.googleapis.com/auth/drive.metadata.readonly", - "https://www.googleapis.com/auth/drive.photos.readonly", - "https://www.googleapis.com/auth/drive.readonly" - ] - }, - "list": { - "id": "drive.permissions.list", - "path": "files/{fileId}/permissions", - "httpMethod": "GET", - "description": "Lists a file's permissions.", - "parameters": { - "fileId": { - "type": "string", - "description": "The ID of the file.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "fileId" - ], - "response": { - "$ref": "PermissionList" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.file", - "https://www.googleapis.com/auth/drive.metadata", - "https://www.googleapis.com/auth/drive.metadata.readonly", - "https://www.googleapis.com/auth/drive.photos.readonly", - "https://www.googleapis.com/auth/drive.readonly" - ] + "type": "object" }, - "update": { - "id": "drive.permissions.update", - "path": "files/{fileId}/permissions/{permissionId}", - "httpMethod": "PATCH", - "description": "Updates a permission with patch semantics.", - "parameters": { - "fileId": { - "type": "string", - "description": "The ID of the file.", - "required": true, - "location": "path" - }, - "permissionId": { - "type": "string", - "description": "The ID of the permission.", - "required": true, - "location": "path" - }, - "transferOwnership": { - "type": "boolean", - "description": "Whether to transfer ownership to the specified user and downgrade the current owner to a writer. This parameter is required as an acknowledgement of the side effect.", - "default": "false", - "location": "query" - } - }, - "parameterOrder": [ - "fileId", - "permissionId" - ], - "request": { - "$ref": "Permission" - }, - "response": { - "$ref": "Permission" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.file" - ] - } - } - }, - "replies": { - "methods": { - "create": { - "id": "drive.replies.create", - "path": "files/{fileId}/comments/{commentId}/replies", - "httpMethod": "POST", - "description": "Creates a new reply to a comment.", - "parameters": { - "commentId": { - "type": "string", - "description": "The ID of the comment.", - "required": true, - "location": "path" + "Channel": { + "description": "An notification channel used to watch for resource changes.", + "id": "Channel", + "properties": { + "address": { + "description": "The address where notifications are delivered for this channel.", + "type": "string" + }, + "expiration": { + "description": "Date and time of notification channel expiration, expressed as a Unix timestamp, in milliseconds. Optional.", + "format": "int64", + "type": "string" + }, + "id": { + "description": "A UUID or similar unique string that identifies this channel.", + "type": "string" + }, + "kind": { + "default": "api#channel", + "description": "Identifies this as a notification channel used to watch for changes to a resource. Value: the fixed string \"api#channel\".", + "type": "string" + }, + "params": { + "additionalProperties": { + "description": "Declares a new parameter by name.", + "type": "string" + }, + "description": "Additional parameters controlling delivery channel behavior. Optional.", + "type": "object" + }, + "payload": { + "description": "A Boolean value to indicate whether payload is wanted. Optional.", + "type": "boolean" + }, + "resourceId": { + "description": "An opaque ID that identifies the resource being watched on this channel. Stable across different API versions.", + "type": "string" + }, + "resourceUri": { + "description": "A version-specific identifier for the watched resource.", + "type": "string" + }, + "token": { + "description": "An arbitrary string delivered to the target address with each notification delivered over this channel. Optional.", + "type": "string" + }, + "type": { + "description": "The type of delivery mechanism used for this channel.", + "type": "string" + } }, - "fileId": { - "type": "string", - "description": "The ID of the file.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "fileId", - "commentId" - ], - "request": { - "$ref": "Reply" - }, - "response": { - "$ref": "Reply" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.file" - ] + "type": "object" }, - "delete": { - "id": "drive.replies.delete", - "path": "files/{fileId}/comments/{commentId}/replies/{replyId}", - "httpMethod": "DELETE", - "description": "Deletes a reply.", - "parameters": { - "commentId": { - "type": "string", - "description": "The ID of the comment.", - "required": true, - "location": "path" + "Comment": { + "description": "A comment on a file.", + "id": "Comment", + "properties": { + "anchor": { + "description": "A region of the document represented as a JSON string. See anchor documentation for details on how to define and interpret anchor properties.", + "type": "string" + }, + "author": { + "$ref": "User", + "description": "The user who created the comment." + }, + "content": { + "annotations": { + "required": [ + "drive.comments.create", + "drive.comments.update" + ] + }, + "description": "The plain text content of the comment. This field is used for setting the content, while htmlContent should be displayed.", + "type": "string" + }, + "createdTime": { + "description": "The time at which the comment was created (RFC 3339 date-time).", + "format": "date-time", + "type": "string" + }, + "deleted": { + "description": "Whether the comment has been deleted. A deleted comment has no content.", + "type": "boolean" + }, + "htmlContent": { + "description": "The content of the comment with HTML formatting.", + "type": "string" + }, + "id": { + "description": "The ID of the comment.", + "type": "string" + }, + "kind": { + "default": "drive#comment", + "description": "Identifies what kind of resource this is. Value: the fixed string \"drive#comment\".", + "type": "string" + }, + "modifiedTime": { + "description": "The last time the comment or any of its replies was modified (RFC 3339 date-time).", + "format": "date-time", + "type": "string" + }, + "quotedFileContent": { + "description": "The file content to which the comment refers, typically within the anchor region. For a text file, for example, this would be the text at the location of the comment.", + "properties": { + "mimeType": { + "description": "The MIME type of the quoted content.", + "type": "string" + }, + "value": { + "description": "The quoted content itself. This is interpreted as plain text if set through the API.", + "type": "string" + } + }, + "type": "object" + }, + "replies": { + "description": "The full list of replies to the comment in chronological order.", + "items": { + "$ref": "Reply" + }, + "type": "array" + }, + "resolved": { + "description": "Whether the comment has been resolved by one of its replies.", + "type": "boolean" + } }, - "fileId": { - "type": "string", - "description": "The ID of the file.", - "required": true, - "location": "path" + "type": "object" + }, + "CommentList": { + "description": "A list of comments on a file.", + "id": "CommentList", + "properties": { + "comments": { + "description": "The list of comments. If nextPageToken is populated, then this list may be incomplete and an additional page of results should be fetched.", + "items": { + "$ref": "Comment" + }, + "type": "array" + }, + "kind": { + "default": "drive#commentList", + "description": "Identifies what kind of resource this is. Value: the fixed string \"drive#commentList\".", + "type": "string" + }, + "nextPageToken": { + "description": "The page token for the next page of comments. This will be absent if the end of the comments list has been reached. If the token is rejected for any reason, it should be discarded, and pagination should be restarted from the first page of results.", + "type": "string" + } }, - "replyId": { - "type": "string", - "description": "The ID of the reply.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "fileId", - "commentId", - "replyId" - ], - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.file" - ] - }, - "get": { - "id": "drive.replies.get", - "path": "files/{fileId}/comments/{commentId}/replies/{replyId}", - "httpMethod": "GET", - "description": "Gets a reply by ID.", - "parameters": { - "commentId": { - "type": "string", - "description": "The ID of the comment.", - "required": true, - "location": "path" + "type": "object" + }, + "File": { + "description": "The metadata for a file.", + "id": "File", + "properties": { + "appProperties": { + "additionalProperties": { + "type": "string" + }, + "description": "A collection of arbitrary key-value pairs which are private to the requesting app.\nEntries with null values are cleared in update and copy requests.", + "type": "object" + }, + "capabilities": { + "description": "Capabilities the current user has on this file. Each capability corresponds to a fine-grained action that a user may take.", + "properties": { + "canAddChildren": { + "description": "Whether the current user can add children to this folder. This is always false when the item is not a folder.", + "type": "boolean" + }, + "canChangeCopyRequiresWriterPermission": { + "description": "Whether the current user can change the copyRequiresWriterPermission restriction of this file.", + "type": "boolean" + }, + "canChangeViewersCanCopyContent": { + "description": "Deprecated", + "type": "boolean" + }, + "canComment": { + "description": "Whether the current user can comment on this file.", + "type": "boolean" + }, + "canCopy": { + "description": "Whether the current user can copy this file. For a Team Drive item, whether the current user can copy non-folder descendants of this item, or this item itself if it is not a folder.", + "type": "boolean" + }, + "canDelete": { + "description": "Whether the current user can delete this file.", + "type": "boolean" + }, + "canDeleteChildren": { + "description": "Whether the current user can delete children of this folder. This is false when the item is not a folder. Only populated for Team Drive items.", + "type": "boolean" + }, + "canDownload": { + "description": "Whether the current user can download this file.", + "type": "boolean" + }, + "canEdit": { + "description": "Whether the current user can edit this file.", + "type": "boolean" + }, + "canListChildren": { + "description": "Whether the current user can list the children of this folder. This is always false when the item is not a folder.", + "type": "boolean" + }, + "canMoveChildrenOutOfTeamDrive": { + "description": "Whether the current user can move children of this folder outside of the Team Drive. This is false when the item is not a folder. Only populated for Team Drive items.", + "type": "boolean" + }, + "canMoveChildrenWithinTeamDrive": { + "description": "Whether the current user can move children of this folder within the Team Drive. This is false when the item is not a folder. Only populated for Team Drive items.", + "type": "boolean" + }, + "canMoveItemIntoTeamDrive": { + "description": "Whether the current user can move this item into a Team Drive. If the item is in a Team Drive, this field is equivalent to canMoveTeamDriveItem.", + "type": "boolean" + }, + "canMoveItemOutOfTeamDrive": { + "description": "Whether the current user can move this Team Drive item outside of this Team Drive by changing its parent. Note that a request to change the parent of the item may still fail depending on the new parent that is being added. Only populated for Team Drive items.", + "type": "boolean" + }, + "canMoveItemWithinTeamDrive": { + "description": "Whether the current user can move this Team Drive item within this Team Drive. Note that a request to change the parent of the item may still fail depending on the new parent that is being added. Only populated for Team Drive items.", + "type": "boolean" + }, + "canMoveTeamDriveItem": { + "description": "Deprecated - use canMoveItemWithinTeamDrive or canMoveItemOutOfTeamDrive instead.", + "type": "boolean" + }, + "canReadRevisions": { + "description": "Whether the current user can read the revisions resource of this file. For a Team Drive item, whether revisions of non-folder descendants of this item, or this item itself if it is not a folder, can be read.", + "type": "boolean" + }, + "canReadTeamDrive": { + "description": "Whether the current user can read the Team Drive to which this file belongs. Only populated for Team Drive files.", + "type": "boolean" + }, + "canRemoveChildren": { + "description": "Whether the current user can remove children from this folder. This is always false when the item is not a folder. For Team Drive items, use canDeleteChildren or canTrashChildren instead.", + "type": "boolean" + }, + "canRename": { + "description": "Whether the current user can rename this file.", + "type": "boolean" + }, + "canShare": { + "description": "Whether the current user can modify the sharing settings for this file.", + "type": "boolean" + }, + "canTrash": { + "description": "Whether the current user can move this file to trash.", + "type": "boolean" + }, + "canTrashChildren": { + "description": "Whether the current user can trash children of this folder. This is false when the item is not a folder. Only populated for Team Drive items.", + "type": "boolean" + }, + "canUntrash": { + "description": "Whether the current user can restore this file from trash.", + "type": "boolean" + } + }, + "type": "object" + }, + "contentHints": { + "description": "Additional information about the content of the file. These fields are never populated in responses.", + "properties": { + "indexableText": { + "description": "Text to be indexed for the file to improve fullText queries. This is limited to 128KB in length and may contain HTML elements.", + "type": "string" + }, + "thumbnail": { + "description": "A thumbnail for the file. This will only be used if Drive cannot generate a standard thumbnail.", + "properties": { + "image": { + "description": "The thumbnail data encoded with URL-safe Base64 (RFC 4648 section 5).", + "format": "byte", + "type": "string" + }, + "mimeType": { + "description": "The MIME type of the thumbnail.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "copyRequiresWriterPermission": { + "description": "Whether the options to copy, print, or download this file, should be disabled for readers and commenters.", + "type": "boolean" + }, + "createdTime": { + "description": "The time at which the file was created (RFC 3339 date-time).", + "format": "date-time", + "type": "string" + }, + "description": { + "description": "A short description of the file.", + "type": "string" + }, + "explicitlyTrashed": { + "description": "Whether the file has been explicitly trashed, as opposed to recursively trashed from a parent folder.", + "type": "boolean" + }, + "exportLinks": { + "additionalProperties": { + "description": "A mapping from export format to URL", + "type": "string" + }, + "description": "Links for exporting Google Docs to specific formats.", + "type": "object" + }, + "fileExtension": { + "description": "The final component of fullFileExtension. This is only available for files with binary content in Drive.", + "type": "string" + }, + "folderColorRgb": { + "description": "The color for a folder as an RGB hex string. The supported colors are published in the folderColorPalette field of the About resource.\nIf an unsupported color is specified, the closest color in the palette will be used instead.", + "type": "string" + }, + "fullFileExtension": { + "description": "The full file extension extracted from the name field. May contain multiple concatenated extensions, such as \"tar.gz\". This is only available for files with binary content in Drive.\nThis is automatically updated when the name field changes, however it is not cleared if the new name does not contain a valid extension.", + "type": "string" + }, + "hasAugmentedPermissions": { + "description": "Whether any users are granted file access directly on this file. This field is only populated for Team Drive files.", + "type": "boolean" + }, + "hasThumbnail": { + "description": "Whether this file has a thumbnail. This does not indicate whether the requesting app has access to the thumbnail. To check access, look for the presence of the thumbnailLink field.", + "type": "boolean" + }, + "headRevisionId": { + "description": "The ID of the file's head revision. This is currently only available for files with binary content in Drive.", + "type": "string" + }, + "iconLink": { + "description": "A static, unauthenticated link to the file's icon.", + "type": "string" + }, + "id": { + "description": "The ID of the file.", + "type": "string" + }, + "imageMediaMetadata": { + "description": "Additional metadata about image media, if available.", + "properties": { + "aperture": { + "description": "The aperture used to create the photo (f-number).", + "format": "float", + "type": "number" + }, + "cameraMake": { + "description": "The make of the camera used to create the photo.", + "type": "string" + }, + "cameraModel": { + "description": "The model of the camera used to create the photo.", + "type": "string" + }, + "colorSpace": { + "description": "The color space of the photo.", + "type": "string" + }, + "exposureBias": { + "description": "The exposure bias of the photo (APEX value).", + "format": "float", + "type": "number" + }, + "exposureMode": { + "description": "The exposure mode used to create the photo.", + "type": "string" + }, + "exposureTime": { + "description": "The length of the exposure, in seconds.", + "format": "float", + "type": "number" + }, + "flashUsed": { + "description": "Whether a flash was used to create the photo.", + "type": "boolean" + }, + "focalLength": { + "description": "The focal length used to create the photo, in millimeters.", + "format": "float", + "type": "number" + }, + "height": { + "description": "The height of the image in pixels.", + "format": "int32", + "type": "integer" + }, + "isoSpeed": { + "description": "The ISO speed used to create the photo.", + "format": "int32", + "type": "integer" + }, + "lens": { + "description": "The lens used to create the photo.", + "type": "string" + }, + "location": { + "description": "Geographic location information stored in the image.", + "properties": { + "altitude": { + "description": "The altitude stored in the image.", + "format": "double", + "type": "number" + }, + "latitude": { + "description": "The latitude stored in the image.", + "format": "double", + "type": "number" + }, + "longitude": { + "description": "The longitude stored in the image.", + "format": "double", + "type": "number" + } + }, + "type": "object" + }, + "maxApertureValue": { + "description": "The smallest f-number of the lens at the focal length used to create the photo (APEX value).", + "format": "float", + "type": "number" + }, + "meteringMode": { + "description": "The metering mode used to create the photo.", + "type": "string" + }, + "rotation": { + "description": "The rotation in clockwise degrees from the image's original orientation.", + "format": "int32", + "type": "integer" + }, + "sensor": { + "description": "The type of sensor used to create the photo.", + "type": "string" + }, + "subjectDistance": { + "description": "The distance to the subject of the photo, in meters.", + "format": "int32", + "type": "integer" + }, + "time": { + "description": "The date and time the photo was taken (EXIF DateTime).", + "type": "string" + }, + "whiteBalance": { + "description": "The white balance mode used to create the photo.", + "type": "string" + }, + "width": { + "description": "The width of the image in pixels.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "isAppAuthorized": { + "description": "Whether the file was created or opened by the requesting app.", + "type": "boolean" + }, + "kind": { + "default": "drive#file", + "description": "Identifies what kind of resource this is. Value: the fixed string \"drive#file\".", + "type": "string" + }, + "lastModifyingUser": { + "$ref": "User", + "description": "The last user to modify the file." + }, + "md5Checksum": { + "description": "The MD5 checksum for the content of the file. This is only applicable to files with binary content in Drive.", + "type": "string" + }, + "mimeType": { + "description": "The MIME type of the file.\nDrive will attempt to automatically detect an appropriate value from uploaded content if no value is provided. The value cannot be changed unless a new revision is uploaded.\nIf a file is created with a Google Doc MIME type, the uploaded content will be imported if possible. The supported import formats are published in the About resource.", + "type": "string" + }, + "modifiedByMe": { + "description": "Whether the file has been modified by this user.", + "type": "boolean" + }, + "modifiedByMeTime": { + "description": "The last time the file was modified by the user (RFC 3339 date-time).", + "format": "date-time", + "type": "string" + }, + "modifiedTime": { + "description": "The last time the file was modified by anyone (RFC 3339 date-time).\nNote that setting modifiedTime will also update modifiedByMeTime for the user.", + "format": "date-time", + "type": "string" + }, + "name": { + "description": "The name of the file. This is not necessarily unique within a folder. Note that for immutable items such as the top level folders of Team Drives, My Drive root folder, and Application Data folder the name is constant.", + "type": "string" + }, + "originalFilename": { + "description": "The original filename of the uploaded content if available, or else the original value of the name field. This is only available for files with binary content in Drive.", + "type": "string" + }, + "ownedByMe": { + "description": "Whether the user owns the file. Not populated for Team Drive files.", + "type": "boolean" + }, + "owners": { + "description": "The owners of the file. Currently, only certain legacy files may have more than one owner. Not populated for Team Drive files.", + "items": { + "$ref": "User" + }, + "type": "array" + }, + "parents": { + "description": "The IDs of the parent folders which contain the file.\nIf not specified as part of a create request, the file will be placed directly in the user's My Drive folder. If not specified as part of a copy request, the file will inherit any discoverable parents of the source file. Update requests must use the addParents and removeParents parameters to modify the parents list.", + "items": { + "type": "string" + }, + "type": "array" + }, + "permissionIds": { + "description": "List of permission IDs for users with access to this file.", + "items": { + "type": "string" + }, + "type": "array" + }, + "permissions": { + "description": "The full list of permissions for the file. This is only available if the requesting user can share the file. Not populated for Team Drive files.", + "items": { + "$ref": "Permission" + }, + "type": "array" + }, + "properties": { + "additionalProperties": { + "type": "string" + }, + "description": "A collection of arbitrary key-value pairs which are visible to all apps.\nEntries with null values are cleared in update and copy requests.", + "type": "object" + }, + "quotaBytesUsed": { + "description": "The number of storage quota bytes used by the file. This includes the head revision as well as previous revisions with keepForever enabled.", + "format": "int64", + "type": "string" + }, + "shared": { + "description": "Whether the file has been shared. Not populated for Team Drive files.", + "type": "boolean" + }, + "sharedWithMeTime": { + "description": "The time at which the file was shared with the user, if applicable (RFC 3339 date-time).", + "format": "date-time", + "type": "string" + }, + "sharingUser": { + "$ref": "User", + "description": "The user who shared the file with the requesting user, if applicable." + }, + "size": { + "description": "The size of the file's content in bytes. This is only applicable to files with binary content in Drive.", + "format": "int64", + "type": "string" + }, + "spaces": { + "description": "The list of spaces which contain the file. The currently supported values are 'drive', 'appDataFolder' and 'photos'.", + "items": { + "type": "string" + }, + "type": "array" + }, + "starred": { + "description": "Whether the user has starred the file.", + "type": "boolean" + }, + "teamDriveId": { + "description": "ID of the Team Drive the file resides in.", + "type": "string" + }, + "thumbnailLink": { + "description": "A short-lived link to the file's thumbnail, if available. Typically lasts on the order of hours. Only populated when the requesting app can access the file's content.", + "type": "string" + }, + "thumbnailVersion": { + "description": "The thumbnail version for use in thumbnail cache invalidation.", + "format": "int64", + "type": "string" + }, + "trashed": { + "description": "Whether the file has been trashed, either explicitly or from a trashed parent folder. Only the owner may trash a file, and other users cannot see files in the owner's trash.", + "type": "boolean" + }, + "trashedTime": { + "description": "The time that the item was trashed (RFC 3339 date-time). Only populated for Team Drive files.", + "format": "date-time", + "type": "string" + }, + "trashingUser": { + "$ref": "User", + "description": "If the file has been explicitly trashed, the user who trashed it. Only populated for Team Drive files." + }, + "version": { + "description": "A monotonically increasing version number for the file. This reflects every change made to the file on the server, even those not visible to the user.", + "format": "int64", + "type": "string" + }, + "videoMediaMetadata": { + "description": "Additional metadata about video media. This may not be available immediately upon upload.", + "properties": { + "durationMillis": { + "description": "The duration of the video in milliseconds.", + "format": "int64", + "type": "string" + }, + "height": { + "description": "The height of the video in pixels.", + "format": "int32", + "type": "integer" + }, + "width": { + "description": "The width of the video in pixels.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "viewedByMe": { + "description": "Whether the file has been viewed by this user.", + "type": "boolean" + }, + "viewedByMeTime": { + "description": "The last time the file was viewed by the user (RFC 3339 date-time).", + "format": "date-time", + "type": "string" + }, + "viewersCanCopyContent": { + "description": "Deprecated - use copyRequiresWriterPermission instead.", + "type": "boolean" + }, + "webContentLink": { + "description": "A link for downloading the content of the file in a browser. This is only available for files with binary content in Drive.", + "type": "string" + }, + "webViewLink": { + "description": "A link for opening the file in a relevant Google editor or viewer in a browser.", + "type": "string" + }, + "writersCanShare": { + "description": "Whether users with only writer permission can modify the file's permissions. Not populated for Team Drive files.", + "type": "boolean" + } }, - "fileId": { - "type": "string", - "description": "The ID of the file.", - "required": true, - "location": "path" + "type": "object" + }, + "FileList": { + "description": "A list of files.", + "id": "FileList", + "properties": { + "files": { + "description": "The list of files. If nextPageToken is populated, then this list may be incomplete and an additional page of results should be fetched.", + "items": { + "$ref": "File" + }, + "type": "array" + }, + "incompleteSearch": { + "description": "Whether the search process was incomplete. If true, then some search results may be missing, since all documents were not searched. This may occur when searching multiple Team Drives with the \"user,allTeamDrives\" corpora, but all corpora could not be searched. When this happens, it is suggested that clients narrow their query by choosing a different corpus such as \"user\" or \"teamDrive\".", + "type": "boolean" + }, + "kind": { + "default": "drive#fileList", + "description": "Identifies what kind of resource this is. Value: the fixed string \"drive#fileList\".", + "type": "string" + }, + "nextPageToken": { + "description": "The page token for the next page of files. This will be absent if the end of the files list has been reached. If the token is rejected for any reason, it should be discarded, and pagination should be restarted from the first page of results.", + "type": "string" + } }, - "includeDeleted": { - "type": "boolean", - "description": "Whether to return deleted replies. Deleted replies will not include their original content.", - "default": "false", - "location": "query" + "type": "object" + }, + "GeneratedIds": { + "description": "A list of generated file IDs which can be provided in create requests.", + "id": "GeneratedIds", + "properties": { + "ids": { + "description": "The IDs generated for the requesting user in the specified space.", + "items": { + "type": "string" + }, + "type": "array" + }, + "kind": { + "default": "drive#generatedIds", + "description": "Identifies what kind of resource this is. Value: the fixed string \"drive#generatedIds\".", + "type": "string" + }, + "space": { + "description": "The type of file that can be created with these IDs.", + "type": "string" + } }, - "replyId": { - "type": "string", - "description": "The ID of the reply.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "fileId", - "commentId", - "replyId" - ], - "response": { - "$ref": "Reply" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.file", - "https://www.googleapis.com/auth/drive.readonly" - ] + "type": "object" }, - "list": { - "id": "drive.replies.list", - "path": "files/{fileId}/comments/{commentId}/replies", - "httpMethod": "GET", - "description": "Lists a comment's replies.", - "parameters": { - "commentId": { - "type": "string", - "description": "The ID of the comment.", - "required": true, - "location": "path" - }, - "fileId": { - "type": "string", - "description": "The ID of the file.", - "required": true, - "location": "path" + "Permission": { + "description": "A permission for a file. A permission grants a user, group, domain or the world access to a file or a folder hierarchy.", + "id": "Permission", + "properties": { + "allowFileDiscovery": { + "description": "Whether the permission allows the file to be discovered through search. This is only applicable for permissions of type domain or anyone.", + "type": "boolean" + }, + "deleted": { + "description": "Whether the account associated with this permission has been deleted. This field only pertains to user and group permissions.", + "type": "boolean" + }, + "displayName": { + "description": "A displayable name for users, groups or domains.", + "type": "string" + }, + "domain": { + "description": "The domain to which this permission refers.", + "type": "string" + }, + "emailAddress": { + "description": "The email address of the user or group to which this permission refers.", + "type": "string" + }, + "expirationTime": { + "description": "The time at which this permission will expire (RFC 3339 date-time). Expiration times have the following restrictions: \n- They can only be set on user and group permissions \n- The time must be in the future \n- The time cannot be more than a year in the future", + "format": "date-time", + "type": "string" + }, + "id": { + "description": "The ID of this permission. This is a unique identifier for the grantee, and is published in User resources as permissionId.", + "type": "string" + }, + "kind": { + "default": "drive#permission", + "description": "Identifies what kind of resource this is. Value: the fixed string \"drive#permission\".", + "type": "string" + }, + "photoLink": { + "description": "A link to the user's profile photo, if available.", + "type": "string" + }, + "role": { + "annotations": { + "required": [ + "drive.permissions.create" + ] + }, + "description": "The role granted by this permission. While new values may be supported in the future, the following are currently allowed: \n- owner \n- organizer \n- fileOrganizer \n- writer \n- commenter \n- reader", + "type": "string" + }, + "teamDrivePermissionDetails": { + "description": "Details of whether the permissions on this Team Drive item are inherited or directly on this item. This is an output-only field which is present only for Team Drive items.", + "items": { + "properties": { + "inherited": { + "description": "Whether this permission is inherited. This field is always populated. This is an output-only field.", + "type": "boolean" + }, + "inheritedFrom": { + "description": "The ID of the item from which this permission is inherited. This is an output-only field and is only populated for members of the Team Drive.", + "type": "string" + }, + "role": { + "description": "The primary role for this user. While new values may be added in the future, the following are currently possible: \n- organizer \n- fileOrganizer \n- writer \n- commenter \n- reader", + "type": "string" + }, + "teamDrivePermissionType": { + "description": "The Team Drive permission type for this user. While new values may be added in future, the following are currently possible: \n- file \n- member", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": { + "annotations": { + "required": [ + "drive.permissions.create" + ] + }, + "description": "The type of the grantee. Valid values are: \n- user \n- group \n- domain \n- anyone", + "type": "string" + } }, - "includeDeleted": { - "type": "boolean", - "description": "Whether to include deleted replies. Deleted replies will not include their original content.", - "default": "false", - "location": "query" + "type": "object" + }, + "PermissionList": { + "description": "A list of permissions for a file.", + "id": "PermissionList", + "properties": { + "kind": { + "default": "drive#permissionList", + "description": "Identifies what kind of resource this is. Value: the fixed string \"drive#permissionList\".", + "type": "string" + }, + "nextPageToken": { + "description": "The page token for the next page of permissions. This field will be absent if the end of the permissions list has been reached. If the token is rejected for any reason, it should be discarded, and pagination should be restarted from the first page of results.", + "type": "string" + }, + "permissions": { + "description": "The list of permissions. If nextPageToken is populated, then this list may be incomplete and an additional page of results should be fetched.", + "items": { + "$ref": "Permission" + }, + "type": "array" + } }, - "pageSize": { - "type": "integer", - "description": "The maximum number of replies to return per page.", - "default": "20", - "format": "int32", - "minimum": "1", - "maximum": "100", - "location": "query" + "type": "object" + }, + "Reply": { + "description": "A reply to a comment on a file.", + "id": "Reply", + "properties": { + "action": { + "description": "The action the reply performed to the parent comment. Valid values are: \n- resolve \n- reopen", + "type": "string" + }, + "author": { + "$ref": "User", + "description": "The user who created the reply." + }, + "content": { + "annotations": { + "required": [ + "drive.replies.update" + ] + }, + "description": "The plain text content of the reply. This field is used for setting the content, while htmlContent should be displayed. This is required on creates if no action is specified.", + "type": "string" + }, + "createdTime": { + "description": "The time at which the reply was created (RFC 3339 date-time).", + "format": "date-time", + "type": "string" + }, + "deleted": { + "description": "Whether the reply has been deleted. A deleted reply has no content.", + "type": "boolean" + }, + "htmlContent": { + "description": "The content of the reply with HTML formatting.", + "type": "string" + }, + "id": { + "description": "The ID of the reply.", + "type": "string" + }, + "kind": { + "default": "drive#reply", + "description": "Identifies what kind of resource this is. Value: the fixed string \"drive#reply\".", + "type": "string" + }, + "modifiedTime": { + "description": "The last time the reply was modified (RFC 3339 date-time).", + "format": "date-time", + "type": "string" + } }, - "pageToken": { - "type": "string", - "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.", - "location": "query" - } - }, - "parameterOrder": [ - "fileId", - "commentId" - ], - "response": { - "$ref": "ReplyList" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.file", - "https://www.googleapis.com/auth/drive.readonly" - ] + "type": "object" }, - "update": { - "id": "drive.replies.update", - "path": "files/{fileId}/comments/{commentId}/replies/{replyId}", - "httpMethod": "PATCH", - "description": "Updates a reply with patch semantics.", - "parameters": { - "commentId": { - "type": "string", - "description": "The ID of the comment.", - "required": true, - "location": "path" + "ReplyList": { + "description": "A list of replies to a comment on a file.", + "id": "ReplyList", + "properties": { + "kind": { + "default": "drive#replyList", + "description": "Identifies what kind of resource this is. Value: the fixed string \"drive#replyList\".", + "type": "string" + }, + "nextPageToken": { + "description": "The page token for the next page of replies. This will be absent if the end of the replies list has been reached. If the token is rejected for any reason, it should be discarded, and pagination should be restarted from the first page of results.", + "type": "string" + }, + "replies": { + "description": "The list of replies. If nextPageToken is populated, then this list may be incomplete and an additional page of results should be fetched.", + "items": { + "$ref": "Reply" + }, + "type": "array" + } }, - "fileId": { - "type": "string", - "description": "The ID of the file.", - "required": true, - "location": "path" + "type": "object" + }, + "Revision": { + "description": "The metadata for a revision to a file.", + "id": "Revision", + "properties": { + "exportLinks": { + "additionalProperties": { + "description": "A mapping from export format to URL", + "type": "string" + }, + "description": "Links for exporting Google Docs to specific formats.", + "type": "object" + }, + "id": { + "description": "The ID of the revision.", + "type": "string" + }, + "keepForever": { + "description": "Whether to keep this revision forever, even if it is no longer the head revision. If not set, the revision will be automatically purged 30 days after newer content is uploaded. This can be set on a maximum of 200 revisions for a file.\nThis field is only applicable to files with binary content in Drive.", + "type": "boolean" + }, + "kind": { + "default": "drive#revision", + "description": "Identifies what kind of resource this is. Value: the fixed string \"drive#revision\".", + "type": "string" + }, + "lastModifyingUser": { + "$ref": "User", + "description": "The last user to modify this revision." + }, + "md5Checksum": { + "description": "The MD5 checksum of the revision's content. This is only applicable to files with binary content in Drive.", + "type": "string" + }, + "mimeType": { + "description": "The MIME type of the revision.", + "type": "string" + }, + "modifiedTime": { + "description": "The last time the revision was modified (RFC 3339 date-time).", + "format": "date-time", + "type": "string" + }, + "originalFilename": { + "description": "The original filename used to create this revision. This is only applicable to files with binary content in Drive.", + "type": "string" + }, + "publishAuto": { + "description": "Whether subsequent revisions will be automatically republished. This is only applicable to Google Docs.", + "type": "boolean" + }, + "published": { + "description": "Whether this revision is published. This is only applicable to Google Docs.", + "type": "boolean" + }, + "publishedOutsideDomain": { + "description": "Whether this revision is published outside the domain. This is only applicable to Google Docs.", + "type": "boolean" + }, + "size": { + "description": "The size of the revision's content in bytes. This is only applicable to files with binary content in Drive.", + "format": "int64", + "type": "string" + } }, - "replyId": { - "type": "string", - "description": "The ID of the reply.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "fileId", - "commentId", - "replyId" - ], - "request": { - "$ref": "Reply" - }, - "response": { - "$ref": "Reply" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.file" - ] - } - } - }, - "revisions": { - "methods": { - "delete": { - "id": "drive.revisions.delete", - "path": "files/{fileId}/revisions/{revisionId}", - "httpMethod": "DELETE", - "description": "Permanently deletes a revision. This method is only applicable to files with binary content in Drive.", - "parameters": { - "fileId": { - "type": "string", - "description": "The ID of the file.", - "required": true, - "location": "path" + "type": "object" + }, + "RevisionList": { + "description": "A list of revisions of a file.", + "id": "RevisionList", + "properties": { + "kind": { + "default": "drive#revisionList", + "description": "Identifies what kind of resource this is. Value: the fixed string \"drive#revisionList\".", + "type": "string" + }, + "nextPageToken": { + "description": "The page token for the next page of revisions. This will be absent if the end of the revisions list has been reached. If the token is rejected for any reason, it should be discarded, and pagination should be restarted from the first page of results.", + "type": "string" + }, + "revisions": { + "description": "The list of revisions. If nextPageToken is populated, then this list may be incomplete and an additional page of results should be fetched.", + "items": { + "$ref": "Revision" + }, + "type": "array" + } }, - "revisionId": { - "type": "string", - "description": "The ID of the revision.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "fileId", - "revisionId" - ], - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.appdata", - "https://www.googleapis.com/auth/drive.file" - ] + "type": "object" }, - "get": { - "id": "drive.revisions.get", - "path": "files/{fileId}/revisions/{revisionId}", - "httpMethod": "GET", - "description": "Gets a revision's metadata or content by ID.", - "parameters": { - "acknowledgeAbuse": { - "type": "boolean", - "description": "Whether the user is acknowledging the risk of downloading known malware or other abusive files. This is only applicable when alt=media.", - "default": "false", - "location": "query" + "StartPageToken": { + "id": "StartPageToken", + "properties": { + "kind": { + "default": "drive#startPageToken", + "description": "Identifies what kind of resource this is. Value: the fixed string \"drive#startPageToken\".", + "type": "string" + }, + "startPageToken": { + "description": "The starting page token for listing changes.", + "type": "string" + } }, - "fileId": { - "type": "string", - "description": "The ID of the file.", - "required": true, - "location": "path" + "type": "object" + }, + "TeamDrive": { + "description": "Representation of a Team Drive.", + "id": "TeamDrive", + "properties": { + "backgroundImageFile": { + "description": "An image file and cropping parameters from which a background image for this Team Drive is set. This is a write only field; it can only be set on drive.teamdrives.update requests that don't set themeId. When specified, all fields of the backgroundImageFile must be set.", + "properties": { + "id": { + "description": "The ID of an image file in Drive to use for the background image.", + "type": "string" + }, + "width": { + "description": "The width of the cropped image in the closed range of 0 to 1. This value represents the width of the cropped image divided by the width of the entire image. The height is computed by applying a width to height aspect ratio of 80 to 9. The resulting image must be at least 1280 pixels wide and 144 pixels high.", + "format": "float", + "type": "number" + }, + "xCoordinate": { + "description": "The X coordinate of the upper left corner of the cropping area in the background image. This is a value in the closed range of 0 to 1. This value represents the horizontal distance from the left side of the entire image to the left side of the cropping area divided by the width of the entire image.", + "format": "float", + "type": "number" + }, + "yCoordinate": { + "description": "The Y coordinate of the upper left corner of the cropping area in the background image. This is a value in the closed range of 0 to 1. This value represents the vertical distance from the top side of the entire image to the top side of the cropping area divided by the height of the entire image.", + "format": "float", + "type": "number" + } + }, + "type": "object" + }, + "backgroundImageLink": { + "description": "A short-lived link to this Team Drive's background image.", + "type": "string" + }, + "capabilities": { + "description": "Capabilities the current user has on this Team Drive.", + "properties": { + "canAddChildren": { + "description": "Whether the current user can add children to folders in this Team Drive.", + "type": "boolean" + }, + "canChangeCopyRequiresWriterPermissionRestriction": { + "description": "Whether the current user can change the copyRequiresWriterPermission restriction of this Team Drive.", + "type": "boolean" + }, + "canChangeDomainUsersOnlyRestriction": { + "description": "Whether the current user can change the domainUsersOnly restriction of this Team Drive.", + "type": "boolean" + }, + "canChangeTeamDriveBackground": { + "description": "Whether the current user can change the background of this Team Drive.", + "type": "boolean" + }, + "canChangeTeamMembersOnlyRestriction": { + "description": "Whether the current user can change the teamMembersOnly restriction of this Team Drive.", + "type": "boolean" + }, + "canComment": { + "description": "Whether the current user can comment on files in this Team Drive.", + "type": "boolean" + }, + "canCopy": { + "description": "Whether the current user can copy files in this Team Drive.", + "type": "boolean" + }, + "canDeleteChildren": { + "description": "Whether the current user can delete children from folders in this Team Drive.", + "type": "boolean" + }, + "canDeleteTeamDrive": { + "description": "Whether the current user can delete this Team Drive. Attempting to delete the Team Drive may still fail if there are untrashed items inside the Team Drive.", + "type": "boolean" + }, + "canDownload": { + "description": "Whether the current user can download files in this Team Drive.", + "type": "boolean" + }, + "canEdit": { + "description": "Whether the current user can edit files in this Team Drive", + "type": "boolean" + }, + "canListChildren": { + "description": "Whether the current user can list the children of folders in this Team Drive.", + "type": "boolean" + }, + "canManageMembers": { + "description": "Whether the current user can add members to this Team Drive or remove them or change their role.", + "type": "boolean" + }, + "canReadRevisions": { + "description": "Whether the current user can read the revisions resource of files in this Team Drive.", + "type": "boolean" + }, + "canRemoveChildren": { + "description": "Deprecated - use canDeleteChildren or canTrashChildren instead.", + "type": "boolean" + }, + "canRename": { + "description": "Whether the current user can rename files or folders in this Team Drive.", + "type": "boolean" + }, + "canRenameTeamDrive": { + "description": "Whether the current user can rename this Team Drive.", + "type": "boolean" + }, + "canShare": { + "description": "Whether the current user can share files or folders in this Team Drive.", + "type": "boolean" + }, + "canTrashChildren": { + "description": "Whether the current user can trash children from folders in this Team Drive.", + "type": "boolean" + } + }, + "type": "object" + }, + "colorRgb": { + "description": "The color of this Team Drive as an RGB hex string. It can only be set on a drive.teamdrives.update request that does not set themeId.", + "type": "string" + }, + "createdTime": { + "description": "The time at which the Team Drive was created (RFC 3339 date-time).", + "format": "date-time", + "type": "string" + }, + "id": { + "description": "The ID of this Team Drive which is also the ID of the top level folder of this Team Drive.", + "type": "string" + }, + "kind": { + "default": "drive#teamDrive", + "description": "Identifies what kind of resource this is. Value: the fixed string \"drive#teamDrive\".", + "type": "string" + }, + "name": { + "description": "The name of this Team Drive.", + "type": "string" + }, + "restrictions": { + "description": "A set of restrictions that apply to this Team Drive or items inside this Team Drive.", + "properties": { + "adminManagedRestrictions": { + "description": "Whether administrative privileges on this Team Drive are required to modify restrictions.", + "type": "boolean" + }, + "copyRequiresWriterPermission": { + "description": "Whether the options to copy, print, or download files inside this Team Drive, should be disabled for readers and commenters. When this restriction is set to true, it will override the similarly named field to true for any file inside this Team Drive.", + "type": "boolean" + }, + "domainUsersOnly": { + "description": "Whether access to this Team Drive and items inside this Team Drive is restricted to users of the domain to which this Team Drive belongs. This restriction may be overridden by other sharing policies controlled outside of this Team Drive.", + "type": "boolean" + }, + "teamMembersOnly": { + "description": "Whether access to items inside this Team Drive is restricted to members of this Team Drive.", + "type": "boolean" + } + }, + "type": "object" + }, + "themeId": { + "description": "The ID of the theme from which the background image and color will be set. The set of possible teamDriveThemes can be retrieved from a drive.about.get response. When not specified on a drive.teamdrives.create request, a random theme is chosen from which the background image and color are set. This is a write-only field; it can only be set on requests that don't set colorRgb or backgroundImageFile.", + "type": "string" + } }, - "revisionId": { - "type": "string", - "description": "The ID of the revision.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "fileId", - "revisionId" - ], - "response": { - "$ref": "Revision" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.appdata", - "https://www.googleapis.com/auth/drive.file", - "https://www.googleapis.com/auth/drive.metadata", - "https://www.googleapis.com/auth/drive.metadata.readonly", - "https://www.googleapis.com/auth/drive.photos.readonly", - "https://www.googleapis.com/auth/drive.readonly" - ], - "supportsMediaDownload": true, - "useMediaDownloadService": true + "type": "object" }, - "list": { - "id": "drive.revisions.list", - "path": "files/{fileId}/revisions", - "httpMethod": "GET", - "description": "Lists a file's revisions.", - "parameters": { - "fileId": { - "type": "string", - "description": "The ID of the file.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "fileId" - ], - "response": { - "$ref": "RevisionList" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.appdata", - "https://www.googleapis.com/auth/drive.file", - "https://www.googleapis.com/auth/drive.metadata", - "https://www.googleapis.com/auth/drive.metadata.readonly", - "https://www.googleapis.com/auth/drive.photos.readonly", - "https://www.googleapis.com/auth/drive.readonly" - ] + "TeamDriveList": { + "description": "A list of Team Drives.", + "id": "TeamDriveList", + "properties": { + "kind": { + "default": "drive#teamDriveList", + "description": "Identifies what kind of resource this is. Value: the fixed string \"drive#teamDriveList\".", + "type": "string" + }, + "nextPageToken": { + "description": "The page token for the next page of Team Drives. This will be absent if the end of the Team Drives list has been reached. If the token is rejected for any reason, it should be discarded, and pagination should be restarted from the first page of results.", + "type": "string" + }, + "teamDrives": { + "description": "The list of Team Drives. If nextPageToken is populated, then this list may be incomplete and an additional page of results should be fetched.", + "items": { + "$ref": "TeamDrive" + }, + "type": "array" + } + }, + "type": "object" }, - "update": { - "id": "drive.revisions.update", - "path": "files/{fileId}/revisions/{revisionId}", - "httpMethod": "PATCH", - "description": "Updates a revision with patch semantics.", - "parameters": { - "fileId": { - "type": "string", - "description": "The ID of the file.", - "required": true, - "location": "path" + "User": { + "description": "Information about a Drive user.", + "id": "User", + "properties": { + "displayName": { + "description": "A plain text displayable name for this user.", + "type": "string" + }, + "emailAddress": { + "description": "The email address of the user. This may not be present in certain contexts if the user has not made their email address visible to the requester.", + "type": "string" + }, + "kind": { + "default": "drive#user", + "description": "Identifies what kind of resource this is. Value: the fixed string \"drive#user\".", + "type": "string" + }, + "me": { + "description": "Whether this user is the requesting user.", + "type": "boolean" + }, + "permissionId": { + "description": "The user's ID as visible in Permission resources.", + "type": "string" + }, + "photoLink": { + "description": "A link to the user's profile photo, if available.", + "type": "string" + } }, - "revisionId": { - "type": "string", - "description": "The ID of the revision.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "fileId", - "revisionId" - ], - "request": { - "$ref": "Revision" - }, - "response": { - "$ref": "Revision" - }, - "scopes": [ - "https://www.googleapis.com/auth/drive", - "https://www.googleapis.com/auth/drive.appdata", - "https://www.googleapis.com/auth/drive.file" - ] + "type": "object" } - } - } - } -} + }, + "servicePath": "drive/v3/", + "title": "Drive API", + "version": "v3" +} \ No newline at end of file diff --git a/vendor/google.golang.org/api/drive/v3/drive-gen.go b/vendor/google.golang.org/api/drive/v3/drive-gen.go index 9e6e0874..beb1a444 100644 --- a/vendor/google.golang.org/api/drive/v3/drive-gen.go +++ b/vendor/google.golang.org/api/drive/v3/drive-gen.go @@ -1,3 +1,9 @@ +// Copyright 2019 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated file. DO NOT EDIT. + // Package drive provides access to the Drive API. // // See https://developers.google.com/drive/ @@ -11,18 +17,18 @@ package drive import ( "bytes" + "context" "encoding/json" "errors" "fmt" - context "golang.org/x/net/context" - ctxhttp "golang.org/x/net/context/ctxhttp" - gensupport "google.golang.org/api/gensupport" - googleapi "google.golang.org/api/googleapi" "io" "net/http" "net/url" "strconv" "strings" + + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" ) // Always reference these packages, just in case the auto-generated code @@ -38,7 +44,6 @@ var _ = googleapi.Version var _ = errors.New var _ = strings.Replace var _ = context.Canceled -var _ = ctxhttp.Do const apiId = "drive:v3" const apiName = "drive" @@ -47,7 +52,7 @@ const basePath = "https://www.googleapis.com/drive/v3/" // OAuth2 scopes used by this API. const ( - // View and manage the files in your Google Drive + // See, edit, create, and delete all of your Google Drive files DriveScope = "https://www.googleapis.com/auth/drive" // View and manage its own configuration data in your Google Drive @@ -66,7 +71,7 @@ const ( // View the photos, videos and albums in your Google Photos DrivePhotosReadonlyScope = "https://www.googleapis.com/auth/drive.photos.readonly" - // View the files in your Google Drive + // See and download all your Google Drive files DriveReadonlyScope = "https://www.googleapis.com/auth/drive.readonly" // Modify your Google Apps Script scripts' behavior @@ -86,6 +91,7 @@ func New(client *http.Client) (*Service, error) { s.Permissions = NewPermissionsService(s) s.Replies = NewRepliesService(s) s.Revisions = NewRevisionsService(s) + s.Teamdrives = NewTeamdrivesService(s) return s, nil } @@ -109,6 +115,8 @@ type Service struct { Replies *RepliesService Revisions *RevisionsService + + Teamdrives *TeamdrivesService } func (s *Service) userAgent() string { @@ -190,12 +198,24 @@ type RevisionsService struct { s *Service } +func NewTeamdrivesService(s *Service) *TeamdrivesService { + rs := &TeamdrivesService{s: s} + return rs +} + +type TeamdrivesService struct { + s *Service +} + // About: Information about the user, the user's Drive, and system // capabilities. type About struct { // AppInstalled: Whether the user has installed the requesting app. AppInstalled bool `json:"appInstalled,omitempty"` + // CanCreateTeamDrives: Whether the user can create Team Drives. + CanCreateTeamDrives bool `json:"canCreateTeamDrives,omitempty"` + // ExportFormats: A map of source MIME type to possible targets for all // supported exports. ExportFormats map[string][]string `json:"exportFormats,omitempty"` @@ -208,7 +228,8 @@ type About struct { // supported imports. ImportFormats map[string][]string `json:"importFormats,omitempty"` - // Kind: This is always drive#about. + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "drive#about". Kind string `json:"kind,omitempty"` // MaxImportSizes: A map of maximum import sizes by MIME type, in bytes. @@ -221,6 +242,9 @@ type About struct { // are measured in bytes. StorageQuota *AboutStorageQuota `json:"storageQuota,omitempty"` + // TeamDriveThemes: A list of themes that are supported for Team Drives. + TeamDriveThemes []*AboutTeamDriveThemes `json:"teamDriveThemes,omitempty"` + // User: The authenticated user. User *User `json:"user,omitempty"` @@ -235,12 +259,20 @@ type About struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AppInstalled") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *About) MarshalJSON() ([]byte, error) { - type noMethod About - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + type NoMethod About + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // AboutStorageQuota: The user's storage quota limits and usage. All @@ -266,33 +298,88 @@ type AboutStorageQuota struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Limit") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *AboutStorageQuota) MarshalJSON() ([]byte, error) { - type noMethod AboutStorageQuota - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + type NoMethod AboutStorageQuota + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AboutTeamDriveThemes struct { + // BackgroundImageLink: A link to this Team Drive theme's background + // image. + BackgroundImageLink string `json:"backgroundImageLink,omitempty"` + + // ColorRgb: The color of this Team Drive theme as an RGB hex string. + ColorRgb string `json:"colorRgb,omitempty"` + + // Id: The ID of the theme. + Id string `json:"id,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BackgroundImageLink") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BackgroundImageLink") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AboutTeamDriveThemes) MarshalJSON() ([]byte, error) { + type NoMethod AboutTeamDriveThemes + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Change: A change to a file. +// Change: A change to a file or Team Drive. type Change struct { - // File: The updated state of the file. Present if the file has not been - // removed. + // File: The updated state of the file. Present if the type is file and + // the file has not been removed from this list of changes. File *File `json:"file,omitempty"` // FileId: The ID of the file which has changed. FileId string `json:"fileId,omitempty"` - // Kind: This is always drive#change. + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "drive#change". Kind string `json:"kind,omitempty"` - // Removed: Whether the file has been removed from the view of the - // changes list, for example by deletion or lost access. + // Removed: Whether the file or Team Drive has been removed from this + // list of changes, for example by deletion or loss of access. Removed bool `json:"removed,omitempty"` + // TeamDrive: The updated state of the Team Drive. Present if the type + // is teamDrive, the user is still a member of the Team Drive, and the + // Team Drive has not been removed. + TeamDrive *TeamDrive `json:"teamDrive,omitempty"` + + // TeamDriveId: The ID of the Team Drive associated with this change. + TeamDriveId string `json:"teamDriveId,omitempty"` + // Time: The time of this change (RFC 3339 date-time). Time string `json:"time,omitempty"` + // Type: The type of the change. Possible values are file and teamDrive. + Type string `json:"type,omitempty"` + // ForceSendFields is a list of field names (e.g. "File") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -300,20 +387,31 @@ type Change struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "File") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *Change) MarshalJSON() ([]byte, error) { - type noMethod Change - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + type NoMethod Change + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ChangeList: A list of changes for a user. type ChangeList struct { - // Changes: The page of changes. + // Changes: The list of changes. If nextPageToken is populated, then + // this list may be incomplete and an additional page of results should + // be fetched. Changes []*Change `json:"changes,omitempty"` - // Kind: This is always drive#changeList. + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "drive#changeList". Kind string `json:"kind,omitempty"` // NewStartPageToken: The starting page token for future changes. This @@ -322,7 +420,9 @@ type ChangeList struct { NewStartPageToken string `json:"newStartPageToken,omitempty"` // NextPageToken: The page token for the next page of changes. This will - // be absent if the end of the current changes list has been reached. + // be absent if the end of the changes list has been reached. If the + // token is rejected for any reason, it should be discarded, and + // pagination should be restarted from the first page of results. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -336,12 +436,20 @@ type ChangeList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Changes") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *ChangeList) MarshalJSON() ([]byte, error) { - type noMethod ChangeList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + type NoMethod ChangeList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Channel: An notification channel used to watch for resource changes. @@ -394,12 +502,20 @@ type Channel struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Address") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *Channel) MarshalJSON() ([]byte, error) { - type noMethod Channel - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + type NoMethod Channel + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Comment: A comment on a file. @@ -430,7 +546,8 @@ type Comment struct { // Id: The ID of the comment. Id string `json:"id,omitempty"` - // Kind: This is always drive#comment. + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "drive#comment". Kind string `json:"kind,omitempty"` // ModifiedTime: The last time the comment or any of its replies was @@ -461,12 +578,20 @@ type Comment struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Anchor") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *Comment) MarshalJSON() ([]byte, error) { - type noMethod Comment - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + type NoMethod Comment + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // CommentQuotedFileContent: The file content to which the comment @@ -487,24 +612,37 @@ type CommentQuotedFileContent struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MimeType") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *CommentQuotedFileContent) MarshalJSON() ([]byte, error) { - type noMethod CommentQuotedFileContent - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + type NoMethod CommentQuotedFileContent + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // CommentList: A list of comments on a file. type CommentList struct { - // Comments: The page of comments. + // Comments: The list of comments. If nextPageToken is populated, then + // this list may be incomplete and an additional page of results should + // be fetched. Comments []*Comment `json:"comments,omitempty"` - // Kind: This is always drive#commentList. + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "drive#commentList". Kind string `json:"kind,omitempty"` // NextPageToken: The page token for the next page of comments. This - // will be absent if the end of the comments list has been reached. + // will be absent if the end of the comments list has been reached. If + // the token is rejected for any reason, it should be discarded, and + // pagination should be restarted from the first page of results. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -518,12 +656,20 @@ type CommentList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Comments") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *CommentList) MarshalJSON() ([]byte, error) { - type noMethod CommentList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + type NoMethod CommentList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // File: The metadata for a file. @@ -533,13 +679,18 @@ type File struct { // Entries with null values are cleared in update and copy requests. AppProperties map[string]string `json:"appProperties,omitempty"` - // Capabilities: Capabilities the current user has on the file. + // Capabilities: Capabilities the current user has on this file. Each + // capability corresponds to a fine-grained action that a user may take. Capabilities *FileCapabilities `json:"capabilities,omitempty"` // ContentHints: Additional information about the content of the file. // These fields are never populated in responses. ContentHints *FileContentHints `json:"contentHints,omitempty"` + // CopyRequiresWriterPermission: Whether the options to copy, print, or + // download this file, should be disabled for readers and commenters. + CopyRequiresWriterPermission bool `json:"copyRequiresWriterPermission,omitempty"` + // CreatedTime: The time at which the file was created (RFC 3339 // date-time). CreatedTime string `json:"createdTime,omitempty"` @@ -551,6 +702,9 @@ type File struct { // opposed to recursively trashed from a parent folder. ExplicitlyTrashed bool `json:"explicitlyTrashed,omitempty"` + // ExportLinks: Links for exporting Google Docs to specific formats. + ExportLinks map[string]string `json:"exportLinks,omitempty"` + // FileExtension: The final component of fullFileExtension. This is only // available for files with binary content in Drive. FileExtension string `json:"fileExtension,omitempty"` @@ -570,6 +724,16 @@ type File struct { // is not cleared if the new name does not contain a valid extension. FullFileExtension string `json:"fullFileExtension,omitempty"` + // HasAugmentedPermissions: Whether any users are granted file access + // directly on this file. This field is only populated for Team Drive + // files. + HasAugmentedPermissions bool `json:"hasAugmentedPermissions,omitempty"` + + // HasThumbnail: Whether this file has a thumbnail. This does not + // indicate whether the requesting app has access to the thumbnail. To + // check access, look for the presence of the thumbnailLink field. + HasThumbnail bool `json:"hasThumbnail,omitempty"` + // HeadRevisionId: The ID of the file's head revision. This is currently // only available for files with binary content in Drive. HeadRevisionId string `json:"headRevisionId,omitempty"` @@ -584,7 +748,12 @@ type File struct { // available. ImageMediaMetadata *FileImageMediaMetadata `json:"imageMediaMetadata,omitempty"` - // Kind: This is always drive#file. + // IsAppAuthorized: Whether the file was created or opened by the + // requesting app. + IsAppAuthorized bool `json:"isAppAuthorized,omitempty"` + + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "drive#file". Kind string `json:"kind,omitempty"` // LastModifyingUser: The last user to modify the file. @@ -603,6 +772,9 @@ type File struct { // are published in the About resource. MimeType string `json:"mimeType,omitempty"` + // ModifiedByMe: Whether the file has been modified by this user. + ModifiedByMe bool `json:"modifiedByMe,omitempty"` + // ModifiedByMeTime: The last time the file was modified by the user // (RFC 3339 date-time). ModifiedByMeTime string `json:"modifiedByMeTime,omitempty"` @@ -614,7 +786,9 @@ type File struct { ModifiedTime string `json:"modifiedTime,omitempty"` // Name: The name of the file. This is not necessarily unique within a - // folder. + // folder. Note that for immutable items such as the top level folders + // of Team Drives, My Drive root folder, and Application Data folder the + // name is constant. Name string `json:"name,omitempty"` // OriginalFilename: The original filename of the uploaded content if @@ -622,21 +796,29 @@ type File struct { // available for files with binary content in Drive. OriginalFilename string `json:"originalFilename,omitempty"` - // OwnedByMe: Whether the user owns the file. + // OwnedByMe: Whether the user owns the file. Not populated for Team + // Drive files. OwnedByMe bool `json:"ownedByMe,omitempty"` // Owners: The owners of the file. Currently, only certain legacy files - // may have more than one owner. + // may have more than one owner. Not populated for Team Drive files. Owners []*User `json:"owners,omitempty"` // Parents: The IDs of the parent folders which contain the file. // If not specified as part of a create request, the file will be placed - // directly in the My Drive folder. Update requests must use the - // addParents and removeParents parameters to modify the values. + // directly in the user's My Drive folder. If not specified as part of a + // copy request, the file will inherit any discoverable parents of the + // source file. Update requests must use the addParents and + // removeParents parameters to modify the parents list. Parents []string `json:"parents,omitempty"` + // PermissionIds: List of permission IDs for users with access to this + // file. + PermissionIds []string `json:"permissionIds,omitempty"` + // Permissions: The full list of permissions for the file. This is only - // available if the requesting user can share the file. + // available if the requesting user can share the file. Not populated + // for Team Drive files. Permissions []*Permission `json:"permissions,omitempty"` // Properties: A collection of arbitrary key-value pairs which are @@ -649,7 +831,8 @@ type File struct { // keepForever enabled. QuotaBytesUsed int64 `json:"quotaBytesUsed,omitempty,string"` - // Shared: Whether the file has been shared. + // Shared: Whether the file has been shared. Not populated for Team + // Drive files. Shared bool `json:"shared,omitempty"` // SharedWithMeTime: The time at which the file was shared with the @@ -671,15 +854,31 @@ type File struct { // Starred: Whether the user has starred the file. Starred bool `json:"starred,omitempty"` + // TeamDriveId: ID of the Team Drive the file resides in. + TeamDriveId string `json:"teamDriveId,omitempty"` + // ThumbnailLink: A short-lived link to the file's thumbnail, if - // available. Typically lasts on the order of hours. + // available. Typically lasts on the order of hours. Only populated when + // the requesting app can access the file's content. ThumbnailLink string `json:"thumbnailLink,omitempty"` + // ThumbnailVersion: The thumbnail version for use in thumbnail cache + // invalidation. + ThumbnailVersion int64 `json:"thumbnailVersion,omitempty,string"` + // Trashed: Whether the file has been trashed, either explicitly or from // a trashed parent folder. Only the owner may trash a file, and other // users cannot see files in the owner's trash. Trashed bool `json:"trashed,omitempty"` + // TrashedTime: The time that the item was trashed (RFC 3339 date-time). + // Only populated for Team Drive files. + TrashedTime string `json:"trashedTime,omitempty"` + + // TrashingUser: If the file has been explicitly trashed, the user who + // trashed it. Only populated for Team Drive files. + TrashingUser *User `json:"trashingUser,omitempty"` + // Version: A monotonically increasing version number for the file. This // reflects every change made to the file on the server, even those not // visible to the user. @@ -696,9 +895,8 @@ type File struct { // 3339 date-time). ViewedByMeTime string `json:"viewedByMeTime,omitempty"` - // ViewersCanCopyContent: Whether users with only reader or commenter - // permission can copy the file's content. This affects copy, download, - // and print operations. + // ViewersCanCopyContent: Deprecated - use copyRequiresWriterPermission + // instead. ViewersCanCopyContent bool `json:"viewersCanCopyContent,omitempty"` // WebContentLink: A link for downloading the content of the file in a @@ -711,7 +909,7 @@ type File struct { WebViewLink string `json:"webViewLink,omitempty"` // WritersCanShare: Whether users with only writer permission can modify - // the file's permissions. + // the file's permissions. Not populated for Team Drive files. WritersCanShare bool `json:"writersCanShare,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -725,46 +923,151 @@ type File struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AppProperties") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *File) MarshalJSON() ([]byte, error) { - type noMethod File - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + type NoMethod File + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// FileCapabilities: Capabilities the current user has on the file. +// FileCapabilities: Capabilities the current user has on this file. +// Each capability corresponds to a fine-grained action that a user may +// take. type FileCapabilities struct { - // CanComment: Whether the user can comment on the file. + // CanAddChildren: Whether the current user can add children to this + // folder. This is always false when the item is not a folder. + CanAddChildren bool `json:"canAddChildren,omitempty"` + + // CanChangeCopyRequiresWriterPermission: Whether the current user can + // change the copyRequiresWriterPermission restriction of this file. + CanChangeCopyRequiresWriterPermission bool `json:"canChangeCopyRequiresWriterPermission,omitempty"` + + // CanChangeViewersCanCopyContent: Deprecated + CanChangeViewersCanCopyContent bool `json:"canChangeViewersCanCopyContent,omitempty"` + + // CanComment: Whether the current user can comment on this file. CanComment bool `json:"canComment,omitempty"` - // CanCopy: Whether the user can copy the file. + // CanCopy: Whether the current user can copy this file. For a Team + // Drive item, whether the current user can copy non-folder descendants + // of this item, or this item itself if it is not a folder. CanCopy bool `json:"canCopy,omitempty"` - // CanEdit: Whether the user can edit the file's content. + // CanDelete: Whether the current user can delete this file. + CanDelete bool `json:"canDelete,omitempty"` + + // CanDeleteChildren: Whether the current user can delete children of + // this folder. This is false when the item is not a folder. Only + // populated for Team Drive items. + CanDeleteChildren bool `json:"canDeleteChildren,omitempty"` + + // CanDownload: Whether the current user can download this file. + CanDownload bool `json:"canDownload,omitempty"` + + // CanEdit: Whether the current user can edit this file. CanEdit bool `json:"canEdit,omitempty"` - // CanReadRevisions: Whether the current user has read access to the - // Revisions resource of the file. + // CanListChildren: Whether the current user can list the children of + // this folder. This is always false when the item is not a folder. + CanListChildren bool `json:"canListChildren,omitempty"` + + // CanMoveChildrenOutOfTeamDrive: Whether the current user can move + // children of this folder outside of the Team Drive. This is false when + // the item is not a folder. Only populated for Team Drive items. + CanMoveChildrenOutOfTeamDrive bool `json:"canMoveChildrenOutOfTeamDrive,omitempty"` + + // CanMoveChildrenWithinTeamDrive: Whether the current user can move + // children of this folder within the Team Drive. This is false when the + // item is not a folder. Only populated for Team Drive items. + CanMoveChildrenWithinTeamDrive bool `json:"canMoveChildrenWithinTeamDrive,omitempty"` + + // CanMoveItemIntoTeamDrive: Whether the current user can move this item + // into a Team Drive. If the item is in a Team Drive, this field is + // equivalent to canMoveTeamDriveItem. + CanMoveItemIntoTeamDrive bool `json:"canMoveItemIntoTeamDrive,omitempty"` + + // CanMoveItemOutOfTeamDrive: Whether the current user can move this + // Team Drive item outside of this Team Drive by changing its parent. + // Note that a request to change the parent of the item may still fail + // depending on the new parent that is being added. Only populated for + // Team Drive items. + CanMoveItemOutOfTeamDrive bool `json:"canMoveItemOutOfTeamDrive,omitempty"` + + // CanMoveItemWithinTeamDrive: Whether the current user can move this + // Team Drive item within this Team Drive. Note that a request to change + // the parent of the item may still fail depending on the new parent + // that is being added. Only populated for Team Drive items. + CanMoveItemWithinTeamDrive bool `json:"canMoveItemWithinTeamDrive,omitempty"` + + // CanMoveTeamDriveItem: Deprecated - use canMoveItemWithinTeamDrive or + // canMoveItemOutOfTeamDrive instead. + CanMoveTeamDriveItem bool `json:"canMoveTeamDriveItem,omitempty"` + + // CanReadRevisions: Whether the current user can read the revisions + // resource of this file. For a Team Drive item, whether revisions of + // non-folder descendants of this item, or this item itself if it is not + // a folder, can be read. CanReadRevisions bool `json:"canReadRevisions,omitempty"` - // CanShare: Whether the user can modify the file's permissions and - // sharing settings. + // CanReadTeamDrive: Whether the current user can read the Team Drive to + // which this file belongs. Only populated for Team Drive files. + CanReadTeamDrive bool `json:"canReadTeamDrive,omitempty"` + + // CanRemoveChildren: Whether the current user can remove children from + // this folder. This is always false when the item is not a folder. For + // Team Drive items, use canDeleteChildren or canTrashChildren instead. + CanRemoveChildren bool `json:"canRemoveChildren,omitempty"` + + // CanRename: Whether the current user can rename this file. + CanRename bool `json:"canRename,omitempty"` + + // CanShare: Whether the current user can modify the sharing settings + // for this file. CanShare bool `json:"canShare,omitempty"` - // ForceSendFields is a list of field names (e.g. "CanComment") to + // CanTrash: Whether the current user can move this file to trash. + CanTrash bool `json:"canTrash,omitempty"` + + // CanTrashChildren: Whether the current user can trash children of this + // folder. This is false when the item is not a folder. Only populated + // for Team Drive items. + CanTrashChildren bool `json:"canTrashChildren,omitempty"` + + // CanUntrash: Whether the current user can restore this file from + // trash. + CanUntrash bool `json:"canUntrash,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CanAddChildren") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CanAddChildren") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *FileCapabilities) MarshalJSON() ([]byte, error) { - type noMethod FileCapabilities - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + type NoMethod FileCapabilities + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // FileContentHints: Additional information about the content of the @@ -786,12 +1089,20 @@ type FileContentHints struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IndexableText") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *FileContentHints) MarshalJSON() ([]byte, error) { - type noMethod FileContentHints - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + type NoMethod FileContentHints + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // FileContentHintsThumbnail: A thumbnail for the file. This will only @@ -811,12 +1122,20 @@ type FileContentHintsThumbnail struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Image") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *FileContentHintsThumbnail) MarshalJSON() ([]byte, error) { - type noMethod FileContentHintsThumbnail - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + type NoMethod FileContentHintsThumbnail + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // FileImageMediaMetadata: Additional metadata about image media, if @@ -895,12 +1214,42 @@ type FileImageMediaMetadata struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Aperture") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *FileImageMediaMetadata) MarshalJSON() ([]byte, error) { - type noMethod FileImageMediaMetadata - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + type NoMethod FileImageMediaMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *FileImageMediaMetadata) UnmarshalJSON(data []byte) error { + type NoMethod FileImageMediaMetadata + var s1 struct { + Aperture gensupport.JSONFloat64 `json:"aperture"` + ExposureBias gensupport.JSONFloat64 `json:"exposureBias"` + ExposureTime gensupport.JSONFloat64 `json:"exposureTime"` + FocalLength gensupport.JSONFloat64 `json:"focalLength"` + MaxApertureValue gensupport.JSONFloat64 `json:"maxApertureValue"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Aperture = float64(s1.Aperture) + s.ExposureBias = float64(s1.ExposureBias) + s.ExposureTime = float64(s1.ExposureTime) + s.FocalLength = float64(s1.FocalLength) + s.MaxApertureValue = float64(s1.MaxApertureValue) + return nil } // FileImageMediaMetadataLocation: Geographic location information @@ -922,12 +1271,38 @@ type FileImageMediaMetadataLocation struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Altitude") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *FileImageMediaMetadataLocation) MarshalJSON() ([]byte, error) { - type noMethod FileImageMediaMetadataLocation - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + type NoMethod FileImageMediaMetadataLocation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *FileImageMediaMetadataLocation) UnmarshalJSON(data []byte) error { + type NoMethod FileImageMediaMetadataLocation + var s1 struct { + Altitude gensupport.JSONFloat64 `json:"altitude"` + Latitude gensupport.JSONFloat64 `json:"latitude"` + Longitude gensupport.JSONFloat64 `json:"longitude"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Altitude = float64(s1.Altitude) + s.Latitude = float64(s1.Latitude) + s.Longitude = float64(s1.Longitude) + return nil } // FileVideoMediaMetadata: Additional metadata about video media. This @@ -949,24 +1324,46 @@ type FileVideoMediaMetadata struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DurationMillis") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *FileVideoMediaMetadata) MarshalJSON() ([]byte, error) { - type noMethod FileVideoMediaMetadata - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + type NoMethod FileVideoMediaMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // FileList: A list of files. type FileList struct { - // Files: The page of files. + // Files: The list of files. If nextPageToken is populated, then this + // list may be incomplete and an additional page of results should be + // fetched. Files []*File `json:"files,omitempty"` - // Kind: This is always drive#fileList. + // IncompleteSearch: Whether the search process was incomplete. If true, + // then some search results may be missing, since all documents were not + // searched. This may occur when searching multiple Team Drives with the + // "user,allTeamDrives" corpora, but all corpora could not be searched. + // When this happens, it is suggested that clients narrow their query by + // choosing a different corpus such as "user" or "teamDrive". + IncompleteSearch bool `json:"incompleteSearch,omitempty"` + + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "drive#fileList". Kind string `json:"kind,omitempty"` // NextPageToken: The page token for the next page of files. This will - // be absent if the end of the files list has been reached. + // be absent if the end of the files list has been reached. If the token + // is rejected for any reason, it should be discarded, and pagination + // should be restarted from the first page of results. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -980,12 +1377,20 @@ type FileList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Files") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *FileList) MarshalJSON() ([]byte, error) { - type noMethod FileList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + type NoMethod FileList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GeneratedIds: A list of generated file IDs which can be provided in @@ -995,7 +1400,8 @@ type GeneratedIds struct { // space. Ids []string `json:"ids,omitempty"` - // Kind: This is always drive#generatedIds + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "drive#generatedIds". Kind string `json:"kind,omitempty"` // Space: The type of file that can be created with these IDs. @@ -1012,12 +1418,20 @@ type GeneratedIds struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Ids") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *GeneratedIds) MarshalJSON() ([]byte, error) { - type noMethod GeneratedIds - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + type NoMethod GeneratedIds + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Permission: A permission for a file. A permission grants a user, @@ -1028,6 +1442,10 @@ type Permission struct { // type domain or anyone. AllowFileDiscovery bool `json:"allowFileDiscovery,omitempty"` + // Deleted: Whether the account associated with this permission has been + // deleted. This field only pertains to user and group permissions. + Deleted bool `json:"deleted,omitempty"` + // DisplayName: A displayable name for users, groups or domains. DisplayName string `json:"displayName,omitempty"` @@ -1038,23 +1456,40 @@ type Permission struct { // permission refers. EmailAddress string `json:"emailAddress,omitempty"` + // ExpirationTime: The time at which this permission will expire (RFC + // 3339 date-time). Expiration times have the following restrictions: + // + // - They can only be set on user and group permissions + // - The time must be in the future + // - The time cannot be more than a year in the future + ExpirationTime string `json:"expirationTime,omitempty"` + // Id: The ID of this permission. This is a unique identifier for the // grantee, and is published in User resources as permissionId. Id string `json:"id,omitempty"` - // Kind: This is always drive#permission. + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "drive#permission". Kind string `json:"kind,omitempty"` // PhotoLink: A link to the user's profile photo, if available. PhotoLink string `json:"photoLink,omitempty"` - // Role: The role granted by this permission. Valid values are: + // Role: The role granted by this permission. While new values may be + // supported in the future, the following are currently allowed: // - owner + // - organizer + // - fileOrganizer // - writer // - commenter // - reader Role string `json:"role,omitempty"` + // TeamDrivePermissionDetails: Details of whether the permissions on + // this Team Drive item are inherited or directly on this item. This is + // an output-only field which is present only for Team Drive items. + TeamDrivePermissionDetails []*PermissionTeamDrivePermissionDetails `json:"teamDrivePermissionDetails,omitempty"` + // Type: The type of the grantee. Valid values are: // - user // - group @@ -1073,20 +1508,88 @@ type Permission struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AllowFileDiscovery") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` } func (s *Permission) MarshalJSON() ([]byte, error) { - type noMethod Permission - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + type NoMethod Permission + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type PermissionTeamDrivePermissionDetails struct { + // Inherited: Whether this permission is inherited. This field is always + // populated. This is an output-only field. + Inherited bool `json:"inherited,omitempty"` + + // InheritedFrom: The ID of the item from which this permission is + // inherited. This is an output-only field and is only populated for + // members of the Team Drive. + InheritedFrom string `json:"inheritedFrom,omitempty"` + + // Role: The primary role for this user. While new values may be added + // in the future, the following are currently possible: + // - organizer + // - fileOrganizer + // - writer + // - commenter + // - reader + Role string `json:"role,omitempty"` + + // TeamDrivePermissionType: The Team Drive permission type for this + // user. While new values may be added in future, the following are + // currently possible: + // - file + // - member + TeamDrivePermissionType string `json:"teamDrivePermissionType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Inherited") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Inherited") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PermissionTeamDrivePermissionDetails) MarshalJSON() ([]byte, error) { + type NoMethod PermissionTeamDrivePermissionDetails + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // PermissionList: A list of permissions for a file. type PermissionList struct { - // Kind: This is always drive#permissionList. + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "drive#permissionList". Kind string `json:"kind,omitempty"` - // Permissions: The full list of permissions. + // NextPageToken: The page token for the next page of permissions. This + // field will be absent if the end of the permissions list has been + // reached. If the token is rejected for any reason, it should be + // discarded, and pagination should be restarted from the first page of + // results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Permissions: The list of permissions. If nextPageToken is populated, + // then this list may be incomplete and an additional page of results + // should be fetched. Permissions []*Permission `json:"permissions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1100,12 +1603,20 @@ type PermissionList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *PermissionList) MarshalJSON() ([]byte, error) { - type noMethod PermissionList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + type NoMethod PermissionList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Reply: A reply to a comment on a file. @@ -1138,7 +1649,8 @@ type Reply struct { // Id: The ID of the reply. Id string `json:"id,omitempty"` - // Kind: This is always drive#reply. + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "drive#reply". Kind string `json:"kind,omitempty"` // ModifiedTime: The last time the reply was modified (RFC 3339 @@ -1156,24 +1668,37 @@ type Reply struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Action") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *Reply) MarshalJSON() ([]byte, error) { - type noMethod Reply - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + type NoMethod Reply + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ReplyList: A list of replies to a comment on a file. type ReplyList struct { - // Kind: This is always drive#replyList. + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "drive#replyList". Kind string `json:"kind,omitempty"` // NextPageToken: The page token for the next page of replies. This will - // be absent if the end of the replies list has been reached. + // be absent if the end of the replies list has been reached. If the + // token is rejected for any reason, it should be discarded, and + // pagination should be restarted from the first page of results. NextPageToken string `json:"nextPageToken,omitempty"` - // Replies: The page of replies. + // Replies: The list of replies. If nextPageToken is populated, then + // this list may be incomplete and an additional page of results should + // be fetched. Replies []*Reply `json:"replies,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1187,16 +1712,27 @@ type ReplyList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *ReplyList) MarshalJSON() ([]byte, error) { - type noMethod ReplyList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + type NoMethod ReplyList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Revision: The metadata for a revision to a file. type Revision struct { + // ExportLinks: Links for exporting Google Docs to specific formats. + ExportLinks map[string]string `json:"exportLinks,omitempty"` + // Id: The ID of the revision. Id string `json:"id,omitempty"` @@ -1207,7 +1743,8 @@ type Revision struct { // This field is only applicable to files with binary content in Drive. KeepForever bool `json:"keepForever,omitempty"` - // Kind: This is always drive#revision. + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "drive#revision". Kind string `json:"kind,omitempty"` // LastModifyingUser: The last user to modify this revision. @@ -1248,27 +1785,44 @@ type Revision struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "ExportLinks") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ExportLinks") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *Revision) MarshalJSON() ([]byte, error) { - type noMethod Revision - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + type NoMethod Revision + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // RevisionList: A list of revisions of a file. type RevisionList struct { - // Kind: This is always drive#revisionList. + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "drive#revisionList". Kind string `json:"kind,omitempty"` - // Revisions: The full list of revisions. + // NextPageToken: The page token for the next page of revisions. This + // will be absent if the end of the revisions list has been reached. If + // the token is rejected for any reason, it should be discarded, and + // pagination should be restarted from the first page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Revisions: The list of revisions. If nextPageToken is populated, then + // this list may be incomplete and an additional page of results should + // be fetched. Revisions []*Revision `json:"revisions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1282,16 +1836,25 @@ type RevisionList struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *RevisionList) MarshalJSON() ([]byte, error) { - type noMethod RevisionList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + type NoMethod RevisionList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type StartPageToken struct { - // Kind: This is always drive#startPageToken. + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "drive#startPageToken". Kind string `json:"kind,omitempty"` // StartPageToken: The starting page token for listing changes. @@ -1308,12 +1871,364 @@ type StartPageToken struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *StartPageToken) MarshalJSON() ([]byte, error) { - type noMethod StartPageToken - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + type NoMethod StartPageToken + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TeamDrive: Representation of a Team Drive. +type TeamDrive struct { + // BackgroundImageFile: An image file and cropping parameters from which + // a background image for this Team Drive is set. This is a write only + // field; it can only be set on drive.teamdrives.update requests that + // don't set themeId. When specified, all fields of the + // backgroundImageFile must be set. + BackgroundImageFile *TeamDriveBackgroundImageFile `json:"backgroundImageFile,omitempty"` + + // BackgroundImageLink: A short-lived link to this Team Drive's + // background image. + BackgroundImageLink string `json:"backgroundImageLink,omitempty"` + + // Capabilities: Capabilities the current user has on this Team Drive. + Capabilities *TeamDriveCapabilities `json:"capabilities,omitempty"` + + // ColorRgb: The color of this Team Drive as an RGB hex string. It can + // only be set on a drive.teamdrives.update request that does not set + // themeId. + ColorRgb string `json:"colorRgb,omitempty"` + + // CreatedTime: The time at which the Team Drive was created (RFC 3339 + // date-time). + CreatedTime string `json:"createdTime,omitempty"` + + // Id: The ID of this Team Drive which is also the ID of the top level + // folder of this Team Drive. + Id string `json:"id,omitempty"` + + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "drive#teamDrive". + Kind string `json:"kind,omitempty"` + + // Name: The name of this Team Drive. + Name string `json:"name,omitempty"` + + // Restrictions: A set of restrictions that apply to this Team Drive or + // items inside this Team Drive. + Restrictions *TeamDriveRestrictions `json:"restrictions,omitempty"` + + // ThemeId: The ID of the theme from which the background image and + // color will be set. The set of possible teamDriveThemes can be + // retrieved from a drive.about.get response. When not specified on a + // drive.teamdrives.create request, a random theme is chosen from which + // the background image and color are set. This is a write-only field; + // it can only be set on requests that don't set colorRgb or + // backgroundImageFile. + ThemeId string `json:"themeId,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "BackgroundImageFile") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BackgroundImageFile") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TeamDrive) MarshalJSON() ([]byte, error) { + type NoMethod TeamDrive + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TeamDriveBackgroundImageFile: An image file and cropping parameters +// from which a background image for this Team Drive is set. This is a +// write only field; it can only be set on drive.teamdrives.update +// requests that don't set themeId. When specified, all fields of the +// backgroundImageFile must be set. +type TeamDriveBackgroundImageFile struct { + // Id: The ID of an image file in Drive to use for the background image. + Id string `json:"id,omitempty"` + + // Width: The width of the cropped image in the closed range of 0 to 1. + // This value represents the width of the cropped image divided by the + // width of the entire image. The height is computed by applying a width + // to height aspect ratio of 80 to 9. The resulting image must be at + // least 1280 pixels wide and 144 pixels high. + Width float64 `json:"width,omitempty"` + + // XCoordinate: The X coordinate of the upper left corner of the + // cropping area in the background image. This is a value in the closed + // range of 0 to 1. This value represents the horizontal distance from + // the left side of the entire image to the left side of the cropping + // area divided by the width of the entire image. + XCoordinate float64 `json:"xCoordinate,omitempty"` + + // YCoordinate: The Y coordinate of the upper left corner of the + // cropping area in the background image. This is a value in the closed + // range of 0 to 1. This value represents the vertical distance from the + // top side of the entire image to the top side of the cropping area + // divided by the height of the entire image. + YCoordinate float64 `json:"yCoordinate,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TeamDriveBackgroundImageFile) MarshalJSON() ([]byte, error) { + type NoMethod TeamDriveBackgroundImageFile + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *TeamDriveBackgroundImageFile) UnmarshalJSON(data []byte) error { + type NoMethod TeamDriveBackgroundImageFile + var s1 struct { + Width gensupport.JSONFloat64 `json:"width"` + XCoordinate gensupport.JSONFloat64 `json:"xCoordinate"` + YCoordinate gensupport.JSONFloat64 `json:"yCoordinate"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Width = float64(s1.Width) + s.XCoordinate = float64(s1.XCoordinate) + s.YCoordinate = float64(s1.YCoordinate) + return nil +} + +// TeamDriveCapabilities: Capabilities the current user has on this Team +// Drive. +type TeamDriveCapabilities struct { + // CanAddChildren: Whether the current user can add children to folders + // in this Team Drive. + CanAddChildren bool `json:"canAddChildren,omitempty"` + + // CanChangeCopyRequiresWriterPermissionRestriction: Whether the current + // user can change the copyRequiresWriterPermission restriction of this + // Team Drive. + CanChangeCopyRequiresWriterPermissionRestriction bool `json:"canChangeCopyRequiresWriterPermissionRestriction,omitempty"` + + // CanChangeDomainUsersOnlyRestriction: Whether the current user can + // change the domainUsersOnly restriction of this Team Drive. + CanChangeDomainUsersOnlyRestriction bool `json:"canChangeDomainUsersOnlyRestriction,omitempty"` + + // CanChangeTeamDriveBackground: Whether the current user can change the + // background of this Team Drive. + CanChangeTeamDriveBackground bool `json:"canChangeTeamDriveBackground,omitempty"` + + // CanChangeTeamMembersOnlyRestriction: Whether the current user can + // change the teamMembersOnly restriction of this Team Drive. + CanChangeTeamMembersOnlyRestriction bool `json:"canChangeTeamMembersOnlyRestriction,omitempty"` + + // CanComment: Whether the current user can comment on files in this + // Team Drive. + CanComment bool `json:"canComment,omitempty"` + + // CanCopy: Whether the current user can copy files in this Team Drive. + CanCopy bool `json:"canCopy,omitempty"` + + // CanDeleteChildren: Whether the current user can delete children from + // folders in this Team Drive. + CanDeleteChildren bool `json:"canDeleteChildren,omitempty"` + + // CanDeleteTeamDrive: Whether the current user can delete this Team + // Drive. Attempting to delete the Team Drive may still fail if there + // are untrashed items inside the Team Drive. + CanDeleteTeamDrive bool `json:"canDeleteTeamDrive,omitempty"` + + // CanDownload: Whether the current user can download files in this Team + // Drive. + CanDownload bool `json:"canDownload,omitempty"` + + // CanEdit: Whether the current user can edit files in this Team Drive + CanEdit bool `json:"canEdit,omitempty"` + + // CanListChildren: Whether the current user can list the children of + // folders in this Team Drive. + CanListChildren bool `json:"canListChildren,omitempty"` + + // CanManageMembers: Whether the current user can add members to this + // Team Drive or remove them or change their role. + CanManageMembers bool `json:"canManageMembers,omitempty"` + + // CanReadRevisions: Whether the current user can read the revisions + // resource of files in this Team Drive. + CanReadRevisions bool `json:"canReadRevisions,omitempty"` + + // CanRemoveChildren: Deprecated - use canDeleteChildren or + // canTrashChildren instead. + CanRemoveChildren bool `json:"canRemoveChildren,omitempty"` + + // CanRename: Whether the current user can rename files or folders in + // this Team Drive. + CanRename bool `json:"canRename,omitempty"` + + // CanRenameTeamDrive: Whether the current user can rename this Team + // Drive. + CanRenameTeamDrive bool `json:"canRenameTeamDrive,omitempty"` + + // CanShare: Whether the current user can share files or folders in this + // Team Drive. + CanShare bool `json:"canShare,omitempty"` + + // CanTrashChildren: Whether the current user can trash children from + // folders in this Team Drive. + CanTrashChildren bool `json:"canTrashChildren,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CanAddChildren") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CanAddChildren") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TeamDriveCapabilities) MarshalJSON() ([]byte, error) { + type NoMethod TeamDriveCapabilities + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TeamDriveRestrictions: A set of restrictions that apply to this Team +// Drive or items inside this Team Drive. +type TeamDriveRestrictions struct { + // AdminManagedRestrictions: Whether administrative privileges on this + // Team Drive are required to modify restrictions. + AdminManagedRestrictions bool `json:"adminManagedRestrictions,omitempty"` + + // CopyRequiresWriterPermission: Whether the options to copy, print, or + // download files inside this Team Drive, should be disabled for readers + // and commenters. When this restriction is set to true, it will + // override the similarly named field to true for any file inside this + // Team Drive. + CopyRequiresWriterPermission bool `json:"copyRequiresWriterPermission,omitempty"` + + // DomainUsersOnly: Whether access to this Team Drive and items inside + // this Team Drive is restricted to users of the domain to which this + // Team Drive belongs. This restriction may be overridden by other + // sharing policies controlled outside of this Team Drive. + DomainUsersOnly bool `json:"domainUsersOnly,omitempty"` + + // TeamMembersOnly: Whether access to items inside this Team Drive is + // restricted to members of this Team Drive. + TeamMembersOnly bool `json:"teamMembersOnly,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "AdminManagedRestrictions") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AdminManagedRestrictions") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TeamDriveRestrictions) MarshalJSON() ([]byte, error) { + type NoMethod TeamDriveRestrictions + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TeamDriveList: A list of Team Drives. +type TeamDriveList struct { + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "drive#teamDriveList". + Kind string `json:"kind,omitempty"` + + // NextPageToken: The page token for the next page of Team Drives. This + // will be absent if the end of the Team Drives list has been reached. + // If the token is rejected for any reason, it should be discarded, and + // pagination should be restarted from the first page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // TeamDrives: The list of Team Drives. If nextPageToken is populated, + // then this list may be incomplete and an additional page of results + // should be fetched. + TeamDrives []*TeamDrive `json:"teamDrives,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TeamDriveList) MarshalJSON() ([]byte, error) { + type NoMethod TeamDriveList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // User: Information about a Drive user. @@ -1326,7 +2241,8 @@ type User struct { // visible to the requester. EmailAddress string `json:"emailAddress,omitempty"` - // Kind: This is always drive#user. + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "drive#user". Kind string `json:"kind,omitempty"` // Me: Whether this user is the requesting user. @@ -1345,12 +2261,20 @@ type User struct { // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DisplayName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } func (s *User) MarshalJSON() ([]byte, error) { - type noMethod User - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) + type NoMethod User + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // method id "drive.about.get": @@ -1360,6 +2284,7 @@ type AboutGetCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Get: Gets information about the user, the user's Drive, and system @@ -1395,21 +2320,35 @@ func (c *AboutGetCall) Context(ctx context.Context) *AboutGetCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AboutGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *AboutGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "about") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - googleapi.SetOpaque(req.URL) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err } - return c.s.client.Do(req) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.about.get" call. @@ -1444,7 +2383,8 @@ func (c *AboutGetCall) Do(opts ...googleapi.CallOption) (*About, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -1476,6 +2416,7 @@ type ChangesGetStartPageTokenCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // GetStartPageToken: Gets the starting pageToken for listing future @@ -1485,6 +2426,21 @@ func (r *ChangesService) GetStartPageToken() *ChangesGetStartPageTokenCall { return c } +// SupportsTeamDrives sets the optional parameter "supportsTeamDrives": +// Whether the requesting application supports Team Drives. +func (c *ChangesGetStartPageTokenCall) SupportsTeamDrives(supportsTeamDrives bool) *ChangesGetStartPageTokenCall { + c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives)) + return c +} + +// TeamDriveId sets the optional parameter "teamDriveId": The ID of the +// Team Drive for which the starting pageToken for listing future +// changes from that Team Drive will be returned. +func (c *ChangesGetStartPageTokenCall) TeamDriveId(teamDriveId string) *ChangesGetStartPageTokenCall { + c.urlParams_.Set("teamDriveId", teamDriveId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -1511,21 +2467,35 @@ func (c *ChangesGetStartPageTokenCall) Context(ctx context.Context) *ChangesGetS return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ChangesGetStartPageTokenCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ChangesGetStartPageTokenCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "changes/startPageToken") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - googleapi.SetOpaque(req.URL) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err } - return c.s.client.Do(req) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.changes.getStartPageToken" call. @@ -1560,7 +2530,8 @@ func (c *ChangesGetStartPageTokenCall) Do(opts ...googleapi.CallOption) (*StartP HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -1568,7 +2539,20 @@ func (c *ChangesGetStartPageTokenCall) Do(opts ...googleapi.CallOption) (*StartP // "description": "Gets the starting pageToken for listing future changes.", // "httpMethod": "GET", // "id": "drive.changes.getStartPageToken", - // "path": "changes/startPageToken", + // "parameters": { + // "supportsTeamDrives": { + // "default": "false", + // "description": "Whether the requesting application supports Team Drives.", + // "location": "query", + // "type": "boolean" + // }, + // "teamDriveId": { + // "description": "The ID of the Team Drive for which the starting pageToken for listing future changes from that Team Drive will be returned.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "changes/startPageToken", // "response": { // "$ref": "StartPageToken" // }, @@ -1592,23 +2576,42 @@ type ChangesListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } -// List: Lists changes for a user. +// List: Lists the changes for a user or Team Drive. func (r *ChangesService) List(pageToken string) *ChangesListCall { c := &ChangesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.urlParams_.Set("pageToken", pageToken) return c } +// IncludeCorpusRemovals sets the optional parameter +// "includeCorpusRemovals": Whether changes should include the file +// resource if the file is still accessible by the user at the time of +// the request, even when a file was removed from the list of changes +// and there will be no further change entries for this file. +func (c *ChangesListCall) IncludeCorpusRemovals(includeCorpusRemovals bool) *ChangesListCall { + c.urlParams_.Set("includeCorpusRemovals", fmt.Sprint(includeCorpusRemovals)) + return c +} + // IncludeRemoved sets the optional parameter "includeRemoved": Whether -// to include changes indicating that items have left the view of the -// changes list, for example by deletion or lost access. +// to include changes indicating that items have been removed from the +// list of changes, for example by deletion or loss of access. func (c *ChangesListCall) IncludeRemoved(includeRemoved bool) *ChangesListCall { c.urlParams_.Set("includeRemoved", fmt.Sprint(includeRemoved)) return c } +// IncludeTeamDriveItems sets the optional parameter +// "includeTeamDriveItems": Whether Team Drive files or changes should +// be included in results. +func (c *ChangesListCall) IncludeTeamDriveItems(includeTeamDriveItems bool) *ChangesListCall { + c.urlParams_.Set("includeTeamDriveItems", fmt.Sprint(includeTeamDriveItems)) + return c +} + // PageSize sets the optional parameter "pageSize": The maximum number // of changes to return per page. func (c *ChangesListCall) PageSize(pageSize int64) *ChangesListCall { @@ -1634,6 +2637,22 @@ func (c *ChangesListCall) Spaces(spaces string) *ChangesListCall { return c } +// SupportsTeamDrives sets the optional parameter "supportsTeamDrives": +// Whether the requesting application supports Team Drives. +func (c *ChangesListCall) SupportsTeamDrives(supportsTeamDrives bool) *ChangesListCall { + c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives)) + return c +} + +// TeamDriveId sets the optional parameter "teamDriveId": The Team Drive +// from which changes will be returned. If specified the change IDs will +// be reflective of the Team Drive; use the combined Team Drive ID and +// change ID as an identifier. +func (c *ChangesListCall) TeamDriveId(teamDriveId string) *ChangesListCall { + c.urlParams_.Set("teamDriveId", teamDriveId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -1660,21 +2679,35 @@ func (c *ChangesListCall) Context(ctx context.Context) *ChangesListCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ChangesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ChangesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "changes") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - googleapi.SetOpaque(req.URL) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err } - return c.s.client.Do(req) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.changes.list" call. @@ -1709,21 +2742,34 @@ func (c *ChangesListCall) Do(opts ...googleapi.CallOption) (*ChangeList, error) HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { - // "description": "Lists changes for a user.", + // "description": "Lists the changes for a user or Team Drive.", // "httpMethod": "GET", // "id": "drive.changes.list", // "parameterOrder": [ // "pageToken" // ], // "parameters": { + // "includeCorpusRemovals": { + // "default": "false", + // "description": "Whether changes should include the file resource if the file is still accessible by the user at the time of the request, even when a file was removed from the list of changes and there will be no further change entries for this file.", + // "location": "query", + // "type": "boolean" + // }, // "includeRemoved": { // "default": "true", - // "description": "Whether to include changes indicating that items have left the view of the changes list, for example by deletion or lost access.", + // "description": "Whether to include changes indicating that items have been removed from the list of changes, for example by deletion or loss of access.", + // "location": "query", + // "type": "boolean" + // }, + // "includeTeamDriveItems": { + // "default": "false", + // "description": "Whether Team Drive files or changes should be included in results.", // "location": "query", // "type": "boolean" // }, @@ -1753,6 +2799,17 @@ func (c *ChangesListCall) Do(opts ...googleapi.CallOption) (*ChangeList, error) // "description": "A comma-separated list of spaces to query within the user corpus. Supported values are 'drive', 'appDataFolder' and 'photos'.", // "location": "query", // "type": "string" + // }, + // "supportsTeamDrives": { + // "default": "false", + // "description": "Whether the requesting application supports Team Drives.", + // "location": "query", + // "type": "boolean" + // }, + // "teamDriveId": { + // "description": "The Team Drive from which changes will be returned. If specified the change IDs will be reflective of the Team Drive; use the combined Team Drive ID and change ID as an identifier.", + // "location": "query", + // "type": "string" // } // }, // "path": "changes", @@ -1780,6 +2837,7 @@ type ChangesWatchCall struct { channel *Channel urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Watch: Subscribes to changes for a user. @@ -1790,14 +2848,32 @@ func (r *ChangesService) Watch(pageToken string, channel *Channel) *ChangesWatch return c } +// IncludeCorpusRemovals sets the optional parameter +// "includeCorpusRemovals": Whether changes should include the file +// resource if the file is still accessible by the user at the time of +// the request, even when a file was removed from the list of changes +// and there will be no further change entries for this file. +func (c *ChangesWatchCall) IncludeCorpusRemovals(includeCorpusRemovals bool) *ChangesWatchCall { + c.urlParams_.Set("includeCorpusRemovals", fmt.Sprint(includeCorpusRemovals)) + return c +} + // IncludeRemoved sets the optional parameter "includeRemoved": Whether -// to include changes indicating that items have left the view of the -// changes list, for example by deletion or lost access. +// to include changes indicating that items have been removed from the +// list of changes, for example by deletion or loss of access. func (c *ChangesWatchCall) IncludeRemoved(includeRemoved bool) *ChangesWatchCall { c.urlParams_.Set("includeRemoved", fmt.Sprint(includeRemoved)) return c } +// IncludeTeamDriveItems sets the optional parameter +// "includeTeamDriveItems": Whether Team Drive files or changes should +// be included in results. +func (c *ChangesWatchCall) IncludeTeamDriveItems(includeTeamDriveItems bool) *ChangesWatchCall { + c.urlParams_.Set("includeTeamDriveItems", fmt.Sprint(includeTeamDriveItems)) + return c +} + // PageSize sets the optional parameter "pageSize": The maximum number // of changes to return per page. func (c *ChangesWatchCall) PageSize(pageSize int64) *ChangesWatchCall { @@ -1823,6 +2899,22 @@ func (c *ChangesWatchCall) Spaces(spaces string) *ChangesWatchCall { return c } +// SupportsTeamDrives sets the optional parameter "supportsTeamDrives": +// Whether the requesting application supports Team Drives. +func (c *ChangesWatchCall) SupportsTeamDrives(supportsTeamDrives bool) *ChangesWatchCall { + c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives)) + return c +} + +// TeamDriveId sets the optional parameter "teamDriveId": The Team Drive +// from which changes will be returned. If specified the change IDs will +// be reflective of the Team Drive; use the combined Team Drive ID and +// change ID as an identifier. +func (c *ChangesWatchCall) TeamDriveId(teamDriveId string) *ChangesWatchCall { + c.urlParams_.Set("teamDriveId", teamDriveId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -1839,24 +2931,37 @@ func (c *ChangesWatchCall) Context(ctx context.Context) *ChangesWatchCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ChangesWatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ChangesWatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "changes/watch") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - googleapi.SetOpaque(req.URL) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err } - return c.s.client.Do(req) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.changes.watch" call. @@ -1891,7 +2996,8 @@ func (c *ChangesWatchCall) Do(opts ...googleapi.CallOption) (*Channel, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -1903,9 +3009,21 @@ func (c *ChangesWatchCall) Do(opts ...googleapi.CallOption) (*Channel, error) { // "pageToken" // ], // "parameters": { + // "includeCorpusRemovals": { + // "default": "false", + // "description": "Whether changes should include the file resource if the file is still accessible by the user at the time of the request, even when a file was removed from the list of changes and there will be no further change entries for this file.", + // "location": "query", + // "type": "boolean" + // }, // "includeRemoved": { // "default": "true", - // "description": "Whether to include changes indicating that items have left the view of the changes list, for example by deletion or lost access.", + // "description": "Whether to include changes indicating that items have been removed from the list of changes, for example by deletion or loss of access.", + // "location": "query", + // "type": "boolean" + // }, + // "includeTeamDriveItems": { + // "default": "false", + // "description": "Whether Team Drive files or changes should be included in results.", // "location": "query", // "type": "boolean" // }, @@ -1935,6 +3053,17 @@ func (c *ChangesWatchCall) Do(opts ...googleapi.CallOption) (*Channel, error) { // "description": "A comma-separated list of spaces to query within the user corpus. Supported values are 'drive', 'appDataFolder' and 'photos'.", // "location": "query", // "type": "string" + // }, + // "supportsTeamDrives": { + // "default": "false", + // "description": "Whether the requesting application supports Team Drives.", + // "location": "query", + // "type": "boolean" + // }, + // "teamDriveId": { + // "description": "The Team Drive from which changes will be returned. If specified the change IDs will be reflective of the Team Drive; use the combined Team Drive ID and change ID as an identifier.", + // "location": "query", + // "type": "string" // } // }, // "path": "changes/watch", @@ -1966,6 +3095,7 @@ type ChannelsStopCall struct { channel *Channel urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Stop: Stop watching resources through this channel @@ -1991,24 +3121,37 @@ func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ChannelsStopCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - googleapi.SetOpaque(req.URL) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err } - return c.s.client.Do(req) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.channels.stop" call. @@ -2053,6 +3196,7 @@ type CommentsCreateCall struct { comment *Comment urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Create: Creates a new comment on a file. @@ -2079,26 +3223,40 @@ func (c *CommentsCreateCall) Context(ctx context.Context) *CommentsCreateCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *CommentsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *CommentsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.comment) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "fileId": c.fileId, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.comments.create" call. @@ -2133,7 +3291,8 @@ func (c *CommentsCreateCall) Do(opts ...googleapi.CallOption) (*Comment, error) HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -2175,6 +3334,7 @@ type CommentsDeleteCall struct { commentId string urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Delete: Deletes a comment. @@ -2201,21 +3361,36 @@ func (c *CommentsDeleteCall) Context(ctx context.Context) *CommentsDeleteCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *CommentsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *CommentsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "fileId": c.fileId, "commentId": c.commentId, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.comments.delete" call. @@ -2270,6 +3445,7 @@ type CommentsGetCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Get: Gets a comment by ID. @@ -2314,24 +3490,39 @@ func (c *CommentsGetCall) Context(ctx context.Context) *CommentsGetCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *CommentsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *CommentsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "fileId": c.fileId, "commentId": c.commentId, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.comments.get" call. @@ -2366,7 +3557,8 @@ func (c *CommentsGetCall) Do(opts ...googleapi.CallOption) (*Comment, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -2419,6 +3611,7 @@ type CommentsListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // List: Lists a file's comments. @@ -2485,23 +3678,38 @@ func (c *CommentsListCall) Context(ctx context.Context) *CommentsListCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *CommentsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *CommentsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "fileId": c.fileId, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.comments.list" call. @@ -2536,7 +3744,8 @@ func (c *CommentsListCall) Do(opts ...googleapi.CallOption) (*CommentList, error HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -2623,6 +3832,7 @@ type CommentsUpdateCall struct { comment *Comment urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Update: Updates a comment with patch semantics. @@ -2650,27 +3860,41 @@ func (c *CommentsUpdateCall) Context(ctx context.Context) *CommentsUpdateCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *CommentsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *CommentsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.comment) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "fileId": c.fileId, "commentId": c.commentId, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.comments.update" call. @@ -2705,7 +3929,8 @@ func (c *CommentsUpdateCall) Do(opts ...googleapi.CallOption) (*Comment, error) HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -2754,6 +3979,7 @@ type FilesCopyCall struct { file *File urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Copy: Creates a copy of a file and applies any requested updates with @@ -2792,6 +4018,13 @@ func (c *FilesCopyCall) OcrLanguage(ocrLanguage string) *FilesCopyCall { return c } +// SupportsTeamDrives sets the optional parameter "supportsTeamDrives": +// Whether the requesting application supports Team Drives. +func (c *FilesCopyCall) SupportsTeamDrives(supportsTeamDrives bool) *FilesCopyCall { + c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -2808,26 +4041,40 @@ func (c *FilesCopyCall) Context(ctx context.Context) *FilesCopyCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FilesCopyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *FilesCopyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.file) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/copy") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "fileId": c.fileId, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.files.copy" call. @@ -2862,7 +4109,8 @@ func (c *FilesCopyCall) Do(opts ...googleapi.CallOption) (*File, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -2896,6 +4144,12 @@ func (c *FilesCopyCall) Do(opts ...googleapi.CallOption) (*File, error) { // "description": "A language hint for OCR processing during image import (ISO 639-1 code).", // "location": "query", // "type": "string" + // }, + // "supportsTeamDrives": { + // "default": "false", + // "description": "Whether the requesting application supports Team Drives.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "files/{fileId}/copy", @@ -2918,15 +4172,12 @@ func (c *FilesCopyCall) Do(opts ...googleapi.CallOption) (*File, error) { // method id "drive.files.create": type FilesCreateCall struct { - s *Service - file *File - urlParams_ gensupport.URLParams - media_ io.Reader - resumableBuffer_ *gensupport.ResumableBuffer - mediaType_ string - mediaSize_ int64 // mediaSize, if known. Used only for calls to progressUpdater_. - progressUpdater_ googleapi.ProgressUpdater - ctx_ context.Context + s *Service + file *File + urlParams_ gensupport.URLParams + mediaInfo_ *gensupport.MediaInfo + ctx_ context.Context + header_ http.Header } // Create: Creates a new file. @@ -2963,6 +4214,13 @@ func (c *FilesCreateCall) OcrLanguage(ocrLanguage string) *FilesCreateCall { return c } +// SupportsTeamDrives sets the optional parameter "supportsTeamDrives": +// Whether the requesting application supports Team Drives. +func (c *FilesCreateCall) SupportsTeamDrives(supportsTeamDrives bool) *FilesCreateCall { + c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives)) + return c +} + // UseContentAsIndexableText sets the optional parameter // "useContentAsIndexableText": Whether to use the uploaded content as // indexable text. @@ -2980,12 +4238,7 @@ func (c *FilesCreateCall) UseContentAsIndexableText(useContentAsIndexableText bo // supplied. // At most one of Media and ResumableMedia may be set. func (c *FilesCreateCall) Media(r io.Reader, options ...googleapi.MediaOption) *FilesCreateCall { - opts := googleapi.ProcessMediaOptions(options) - chunkSize := opts.ChunkSize - if !opts.ForceEmptyContentType { - r, c.mediaType_ = gensupport.DetermineContentType(r, opts.ContentType) - } - c.media_, c.resumableBuffer_ = gensupport.PrepareUpload(r, chunkSize) + c.mediaInfo_ = gensupport.NewInfoFromMedia(r, options) return c } @@ -3000,11 +4253,7 @@ func (c *FilesCreateCall) Media(r io.Reader, options ...googleapi.MediaOption) * // supersede any context previously provided to the Context method. func (c *FilesCreateCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *FilesCreateCall { c.ctx_ = ctx - rdr := gensupport.ReaderAtToReader(r, size) - rdr, c.mediaType_ = gensupport.DetermineContentType(rdr, mediaType) - c.resumableBuffer_ = gensupport.NewResumableBuffer(rdr, googleapi.DefaultUploadChunkSize) - c.media_ = nil - c.mediaSize_ = size + c.mediaInfo_ = gensupport.NewInfoFromResumableMedia(r, size, mediaType) return c } @@ -3013,7 +4262,7 @@ func (c *FilesCreateCall) ResumableMedia(ctx context.Context, r io.ReaderAt, siz // not slow down the upload operation. This should only be called when // using ResumableMedia (as opposed to Media). func (c *FilesCreateCall) ProgressUpdater(pu googleapi.ProgressUpdater) *FilesCreateCall { - c.progressUpdater_ = pu + c.mediaInfo_.SetProgressUpdater(pu) return c } @@ -3035,41 +4284,48 @@ func (c *FilesCreateCall) Context(ctx context.Context) *FilesCreateCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FilesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *FilesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.file) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files") - if c.media_ != nil || c.resumableBuffer_ != nil { + if c.mediaInfo_ != nil { urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1) - protocol := "multipart" - if c.resumableBuffer_ != nil { - protocol = "resumable" - } - c.urlParams_.Set("uploadType", protocol) - } - urls += "?" + c.urlParams_.Encode() - if c.media_ != nil { - var combined io.ReadCloser - combined, ctype = gensupport.CombineBodyMedia(body, ctype, c.media_, c.mediaType_) - defer combined.Close() - body = combined + c.urlParams_.Set("uploadType", c.mediaInfo_.UploadType()) } - req, _ := http.NewRequest("POST", urls, body) - googleapi.SetOpaque(req.URL) - if c.resumableBuffer_ != nil && c.mediaType_ != "" { - req.Header.Set("X-Upload-Content-Type", c.mediaType_) + if body == nil { + body = new(bytes.Buffer) + reqHeaders.Set("Content-Type", "application/json") } - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) + body, getBody, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body) + defer cleanup() + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err } - return c.s.client.Do(req) + req.Header = reqHeaders + gensupport.SetGetBody(req, getBody) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.files.create" call. @@ -3098,20 +4354,10 @@ func (c *FilesCreateCall) Do(opts ...googleapi.CallOption) (*File, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - if c.resumableBuffer_ != nil { - loc := res.Header.Get("Location") - rx := &gensupport.ResumableUpload{ - Client: c.s.client, - UserAgent: c.s.userAgent(), - URI: loc, - Media: c.resumableBuffer_, - MediaType: c.mediaType_, - Callback: func(curr int64) { - if c.progressUpdater_ != nil { - c.progressUpdater_(curr, c.mediaSize_) - } - }, - } + rx := c.mediaInfo_.ResumableUpload(res.Header.Get("Location")) + if rx != nil { + rx.Client = c.s.client + rx.UserAgent = c.s.userAgent() ctx := c.ctx_ if ctx == nil { ctx = context.TODO() @@ -3131,7 +4377,8 @@ func (c *FilesCreateCall) Do(opts ...googleapi.CallOption) (*File, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3173,6 +4420,12 @@ func (c *FilesCreateCall) Do(opts ...googleapi.CallOption) (*File, error) { // "location": "query", // "type": "string" // }, + // "supportsTeamDrives": { + // "default": "false", + // "description": "Whether the requesting application supports Team Drives.", + // "location": "query", + // "type": "boolean" + // }, // "useContentAsIndexableText": { // "default": "false", // "description": "Whether to use the uploaded content as indexable text.", @@ -3205,17 +4458,26 @@ type FilesDeleteCall struct { fileId string urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Delete: Permanently deletes a file owned by the user without moving -// it to the trash. If the target is a folder, all descendants owned by -// the user are also deleted. +// it to the trash. If the file belongs to a Team Drive the user must be +// an organizer on the parent. If the target is a folder, all +// descendants owned by the user are also deleted. func (r *FilesService) Delete(fileId string) *FilesDeleteCall { c := &FilesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.fileId = fileId return c } +// SupportsTeamDrives sets the optional parameter "supportsTeamDrives": +// Whether the requesting application supports Team Drives. +func (c *FilesDeleteCall) SupportsTeamDrives(supportsTeamDrives bool) *FilesDeleteCall { + c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -3232,20 +4494,35 @@ func (c *FilesDeleteCall) Context(ctx context.Context) *FilesDeleteCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FilesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *FilesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "fileId": c.fileId, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.files.delete" call. @@ -3261,7 +4538,7 @@ func (c *FilesDeleteCall) Do(opts ...googleapi.CallOption) error { } return nil // { - // "description": "Permanently deletes a file owned by the user without moving it to the trash. If the target is a folder, all descendants owned by the user are also deleted.", + // "description": "Permanently deletes a file owned by the user without moving it to the trash. If the file belongs to a Team Drive the user must be an organizer on the parent. If the target is a folder, all descendants owned by the user are also deleted.", // "httpMethod": "DELETE", // "id": "drive.files.delete", // "parameterOrder": [ @@ -3273,6 +4550,12 @@ func (c *FilesDeleteCall) Do(opts ...googleapi.CallOption) error { // "location": "path", // "required": true, // "type": "string" + // }, + // "supportsTeamDrives": { + // "default": "false", + // "description": "Whether the requesting application supports Team Drives.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "files/{fileId}", @@ -3291,6 +4574,7 @@ type FilesEmptyTrashCall struct { s *Service urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // EmptyTrash: Permanently deletes all of the user's trashed files. @@ -3315,18 +4599,32 @@ func (c *FilesEmptyTrashCall) Context(ctx context.Context) *FilesEmptyTrashCall return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FilesEmptyTrashCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *FilesEmptyTrashCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files/trash") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - googleapi.SetOpaque(req.URL) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err } - return c.s.client.Do(req) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.files.emptyTrash" call. @@ -3361,10 +4659,12 @@ type FilesExportCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Export: Exports a Google Doc to the requested MIME type and returns -// the exported content. +// the exported content. Please note that the exported content is +// limited to 10MB. func (r *FilesService) Export(fileId string, mimeType string) *FilesExportCall { c := &FilesExportCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.fileId = fileId @@ -3398,23 +4698,38 @@ func (c *FilesExportCall) Context(ctx context.Context) *FilesExportCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FilesExportCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *FilesExportCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/export") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "fileId": c.fileId, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Download fetches the API endpoint's "media" value, instead of the normal @@ -3446,7 +4761,7 @@ func (c *FilesExportCall) Do(opts ...googleapi.CallOption) error { } return nil // { - // "description": "Exports a Google Doc to the requested MIME type and returns the exported content.", + // "description": "Exports a Google Doc to the requested MIME type and returns the exported content. Please note that the exported content is limited to 10MB.", // "httpMethod": "GET", // "id": "drive.files.export", // "parameterOrder": [ @@ -3485,6 +4800,7 @@ type FilesGenerateIdsCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // GenerateIds: Generates a set of file IDs which can be provided in @@ -3535,21 +4851,35 @@ func (c *FilesGenerateIdsCall) Context(ctx context.Context) *FilesGenerateIdsCal return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FilesGenerateIdsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *FilesGenerateIdsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files/generateIds") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - googleapi.SetOpaque(req.URL) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err } - return c.s.client.Do(req) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.files.generateIds" call. @@ -3584,7 +4914,8 @@ func (c *FilesGenerateIdsCall) Do(opts ...googleapi.CallOption) (*GeneratedIds, HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3630,6 +4961,7 @@ type FilesGetCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Get: Gets a file's metadata or content by ID. @@ -3648,6 +4980,13 @@ func (c *FilesGetCall) AcknowledgeAbuse(acknowledgeAbuse bool) *FilesGetCall { return c } +// SupportsTeamDrives sets the optional parameter "supportsTeamDrives": +// Whether the requesting application supports Team Drives. +func (c *FilesGetCall) SupportsTeamDrives(supportsTeamDrives bool) *FilesGetCall { + c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -3674,23 +5013,38 @@ func (c *FilesGetCall) Context(ctx context.Context) *FilesGetCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FilesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *FilesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "fileId": c.fileId, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Download fetches the API endpoint's "media" value, instead of the normal @@ -3741,7 +5095,8 @@ func (c *FilesGetCall) Do(opts ...googleapi.CallOption) (*File, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3764,6 +5119,12 @@ func (c *FilesGetCall) Do(opts ...googleapi.CallOption) (*File, error) { // "location": "path", // "required": true, // "type": "string" + // }, + // "supportsTeamDrives": { + // "default": "false", + // "description": "Whether the requesting application supports Team Drives.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "files/{fileId}", @@ -3793,6 +5154,7 @@ type FilesListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // List: Lists or searches files. @@ -3801,33 +5163,53 @@ func (r *FilesService) List() *FilesListCall { return c } +// Corpora sets the optional parameter "corpora": Comma-separated list +// of bodies of items (files/documents) to which the query applies. +// Supported bodies are 'user', 'domain', 'teamDrive' and +// 'allTeamDrives'. 'allTeamDrives' must be combined with 'user'; all +// other values must be used in isolation. Prefer 'user' or 'teamDrive' +// to 'allTeamDrives' for efficiency. +func (c *FilesListCall) Corpora(corpora string) *FilesListCall { + c.urlParams_.Set("corpora", corpora) + return c +} + // Corpus sets the optional parameter "corpus": The source of files to -// list. +// list. Deprecated: use 'corpora' instead. // // Possible values: // "domain" - Files shared to the user's domain. -// "user" (default) - Files owned by or shared to the user. +// "user" - Files owned by or shared to the user. func (c *FilesListCall) Corpus(corpus string) *FilesListCall { c.urlParams_.Set("corpus", corpus) return c } +// IncludeTeamDriveItems sets the optional parameter +// "includeTeamDriveItems": Whether Team Drive items should be included +// in results. +func (c *FilesListCall) IncludeTeamDriveItems(includeTeamDriveItems bool) *FilesListCall { + c.urlParams_.Set("includeTeamDriveItems", fmt.Sprint(includeTeamDriveItems)) + return c +} + // OrderBy sets the optional parameter "orderBy": A comma-separated list // of sort keys. Valid keys are 'createdTime', 'folder', -// 'modifiedByMeTime', 'modifiedTime', 'name', 'quotaBytesUsed', -// 'recency', 'sharedWithMeTime', 'starred', and 'viewedByMeTime'. Each -// key sorts ascending by default, but may be reversed with the 'desc' -// modifier. Example usage: ?orderBy=folder,modifiedTime desc,name. -// Please note that there is a current limitation for users with -// approximately one million files in which the requested sort order is -// ignored. +// 'modifiedByMeTime', 'modifiedTime', 'name', 'name_natural', +// 'quotaBytesUsed', 'recency', 'sharedWithMeTime', 'starred', and +// 'viewedByMeTime'. Each key sorts ascending by default, but may be +// reversed with the 'desc' modifier. Example usage: +// ?orderBy=folder,modifiedTime desc,name. Please note that there is a +// current limitation for users with approximately one million files in +// which the requested sort order is ignored. func (c *FilesListCall) OrderBy(orderBy string) *FilesListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageSize sets the optional parameter "pageSize": The maximum number -// of files to return per page. +// of files to return per page. Partial or empty result pages are +// possible even before the end of the files list has been reached. func (c *FilesListCall) PageSize(pageSize int64) *FilesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c @@ -3856,6 +5238,20 @@ func (c *FilesListCall) Spaces(spaces string) *FilesListCall { return c } +// SupportsTeamDrives sets the optional parameter "supportsTeamDrives": +// Whether the requesting application supports Team Drives. +func (c *FilesListCall) SupportsTeamDrives(supportsTeamDrives bool) *FilesListCall { + c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives)) + return c +} + +// TeamDriveId sets the optional parameter "teamDriveId": ID of Team +// Drive to search. +func (c *FilesListCall) TeamDriveId(teamDriveId string) *FilesListCall { + c.urlParams_.Set("teamDriveId", teamDriveId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -3882,21 +5278,35 @@ func (c *FilesListCall) Context(ctx context.Context) *FilesListCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FilesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *FilesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - googleapi.SetOpaque(req.URL) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err } - return c.s.client.Do(req) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.files.list" call. @@ -3931,7 +5341,8 @@ func (c *FilesListCall) Do(opts ...googleapi.CallOption) (*FileList, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3940,9 +5351,13 @@ func (c *FilesListCall) Do(opts ...googleapi.CallOption) (*FileList, error) { // "httpMethod": "GET", // "id": "drive.files.list", // "parameters": { + // "corpora": { + // "description": "Comma-separated list of bodies of items (files/documents) to which the query applies. Supported bodies are 'user', 'domain', 'teamDrive' and 'allTeamDrives'. 'allTeamDrives' must be combined with 'user'; all other values must be used in isolation. Prefer 'user' or 'teamDrive' to 'allTeamDrives' for efficiency.", + // "location": "query", + // "type": "string" + // }, // "corpus": { - // "default": "user", - // "description": "The source of files to list.", + // "description": "The source of files to list. Deprecated: use 'corpora' instead.", // "enum": [ // "domain", // "user" @@ -3954,14 +5369,20 @@ func (c *FilesListCall) Do(opts ...googleapi.CallOption) (*FileList, error) { // "location": "query", // "type": "string" // }, + // "includeTeamDriveItems": { + // "default": "false", + // "description": "Whether Team Drive items should be included in results.", + // "location": "query", + // "type": "boolean" + // }, // "orderBy": { - // "description": "A comma-separated list of sort keys. Valid keys are 'createdTime', 'folder', 'modifiedByMeTime', 'modifiedTime', 'name', 'quotaBytesUsed', 'recency', 'sharedWithMeTime', 'starred', and 'viewedByMeTime'. Each key sorts ascending by default, but may be reversed with the 'desc' modifier. Example usage: ?orderBy=folder,modifiedTime desc,name. Please note that there is a current limitation for users with approximately one million files in which the requested sort order is ignored.", + // "description": "A comma-separated list of sort keys. Valid keys are 'createdTime', 'folder', 'modifiedByMeTime', 'modifiedTime', 'name', 'name_natural', 'quotaBytesUsed', 'recency', 'sharedWithMeTime', 'starred', and 'viewedByMeTime'. Each key sorts ascending by default, but may be reversed with the 'desc' modifier. Example usage: ?orderBy=folder,modifiedTime desc,name. Please note that there is a current limitation for users with approximately one million files in which the requested sort order is ignored.", // "location": "query", // "type": "string" // }, // "pageSize": { // "default": "100", - // "description": "The maximum number of files to return per page.", + // "description": "The maximum number of files to return per page. Partial or empty result pages are possible even before the end of the files list has been reached.", // "format": "int32", // "location": "query", // "maximum": "1000", @@ -3983,6 +5404,17 @@ func (c *FilesListCall) Do(opts ...googleapi.CallOption) (*FileList, error) { // "description": "A comma-separated list of spaces to query within the corpus. Supported values are 'drive', 'appDataFolder' and 'photos'.", // "location": "query", // "type": "string" + // }, + // "supportsTeamDrives": { + // "default": "false", + // "description": "Whether the requesting application supports Team Drives.", + // "location": "query", + // "type": "boolean" + // }, + // "teamDriveId": { + // "description": "ID of Team Drive to search.", + // "location": "query", + // "type": "string" // } // }, // "path": "files", @@ -4026,16 +5458,13 @@ func (c *FilesListCall) Pages(ctx context.Context, f func(*FileList) error) erro // method id "drive.files.update": type FilesUpdateCall struct { - s *Service - fileId string - file *File - urlParams_ gensupport.URLParams - media_ io.Reader - resumableBuffer_ *gensupport.ResumableBuffer - mediaType_ string - mediaSize_ int64 // mediaSize, if known. Used only for calls to progressUpdater_. - progressUpdater_ googleapi.ProgressUpdater - ctx_ context.Context + s *Service + fileId string + file *File + urlParams_ gensupport.URLParams + mediaInfo_ *gensupport.MediaInfo + ctx_ context.Context + header_ http.Header } // Update: Updates a file's metadata and/or content with patch @@ -4077,6 +5506,13 @@ func (c *FilesUpdateCall) RemoveParents(removeParents string) *FilesUpdateCall { return c } +// SupportsTeamDrives sets the optional parameter "supportsTeamDrives": +// Whether the requesting application supports Team Drives. +func (c *FilesUpdateCall) SupportsTeamDrives(supportsTeamDrives bool) *FilesUpdateCall { + c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives)) + return c +} + // UseContentAsIndexableText sets the optional parameter // "useContentAsIndexableText": Whether to use the uploaded content as // indexable text. @@ -4094,12 +5530,7 @@ func (c *FilesUpdateCall) UseContentAsIndexableText(useContentAsIndexableText bo // supplied. // At most one of Media and ResumableMedia may be set. func (c *FilesUpdateCall) Media(r io.Reader, options ...googleapi.MediaOption) *FilesUpdateCall { - opts := googleapi.ProcessMediaOptions(options) - chunkSize := opts.ChunkSize - if !opts.ForceEmptyContentType { - r, c.mediaType_ = gensupport.DetermineContentType(r, opts.ContentType) - } - c.media_, c.resumableBuffer_ = gensupport.PrepareUpload(r, chunkSize) + c.mediaInfo_ = gensupport.NewInfoFromMedia(r, options) return c } @@ -4114,11 +5545,7 @@ func (c *FilesUpdateCall) Media(r io.Reader, options ...googleapi.MediaOption) * // supersede any context previously provided to the Context method. func (c *FilesUpdateCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *FilesUpdateCall { c.ctx_ = ctx - rdr := gensupport.ReaderAtToReader(r, size) - rdr, c.mediaType_ = gensupport.DetermineContentType(rdr, mediaType) - c.resumableBuffer_ = gensupport.NewResumableBuffer(rdr, googleapi.DefaultUploadChunkSize) - c.media_ = nil - c.mediaSize_ = size + c.mediaInfo_ = gensupport.NewInfoFromResumableMedia(r, size, mediaType) return c } @@ -4127,7 +5554,7 @@ func (c *FilesUpdateCall) ResumableMedia(ctx context.Context, r io.ReaderAt, siz // not slow down the upload operation. This should only be called when // using ResumableMedia (as opposed to Media). func (c *FilesUpdateCall) ProgressUpdater(pu googleapi.ProgressUpdater) *FilesUpdateCall { - c.progressUpdater_ = pu + c.mediaInfo_.SetProgressUpdater(pu) return c } @@ -4149,43 +5576,51 @@ func (c *FilesUpdateCall) Context(ctx context.Context) *FilesUpdateCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FilesUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *FilesUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.file) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}") - if c.media_ != nil || c.resumableBuffer_ != nil { + if c.mediaInfo_ != nil { urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1) - protocol := "multipart" - if c.resumableBuffer_ != nil { - protocol = "resumable" - } - c.urlParams_.Set("uploadType", protocol) + c.urlParams_.Set("uploadType", c.mediaInfo_.UploadType()) } + if body == nil { + body = new(bytes.Buffer) + reqHeaders.Set("Content-Type", "application/json") + } + body, getBody, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body) + defer cleanup() urls += "?" + c.urlParams_.Encode() - if c.media_ != nil { - var combined io.ReadCloser - combined, ctype = gensupport.CombineBodyMedia(body, ctype, c.media_, c.mediaType_) - defer combined.Close() - body = combined + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err } - req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + gensupport.SetGetBody(req, getBody) googleapi.Expand(req.URL, map[string]string{ "fileId": c.fileId, }) - if c.resumableBuffer_ != nil && c.mediaType_ != "" { - req.Header.Set("X-Upload-Content-Type", c.mediaType_) - } - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.files.update" call. @@ -4214,20 +5649,10 @@ func (c *FilesUpdateCall) Do(opts ...googleapi.CallOption) (*File, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - if c.resumableBuffer_ != nil { - loc := res.Header.Get("Location") - rx := &gensupport.ResumableUpload{ - Client: c.s.client, - UserAgent: c.s.userAgent(), - URI: loc, - Media: c.resumableBuffer_, - MediaType: c.mediaType_, - Callback: func(curr int64) { - if c.progressUpdater_ != nil { - c.progressUpdater_(curr, c.mediaSize_) - } - }, - } + rx := c.mediaInfo_.ResumableUpload(res.Header.Get("Location")) + if rx != nil { + rx.Client = c.s.client + rx.UserAgent = c.s.userAgent() ctx := c.ctx_ if ctx == nil { ctx = context.TODO() @@ -4247,7 +5672,8 @@ func (c *FilesUpdateCall) Do(opts ...googleapi.CallOption) (*File, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -4302,6 +5728,12 @@ func (c *FilesUpdateCall) Do(opts ...googleapi.CallOption) (*File, error) { // "location": "query", // "type": "string" // }, + // "supportsTeamDrives": { + // "default": "false", + // "description": "Whether the requesting application supports Team Drives.", + // "location": "query", + // "type": "boolean" + // }, // "useContentAsIndexableText": { // "default": "false", // "description": "Whether to use the uploaded content as indexable text.", @@ -4336,6 +5768,7 @@ type FilesWatchCall struct { channel *Channel urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Watch: Subscribes to changes to a file @@ -4355,6 +5788,13 @@ func (c *FilesWatchCall) AcknowledgeAbuse(acknowledgeAbuse bool) *FilesWatchCall return c } +// SupportsTeamDrives sets the optional parameter "supportsTeamDrives": +// Whether the requesting application supports Team Drives. +func (c *FilesWatchCall) SupportsTeamDrives(supportsTeamDrives bool) *FilesWatchCall { + c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -4371,26 +5811,40 @@ func (c *FilesWatchCall) Context(ctx context.Context) *FilesWatchCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FilesWatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *FilesWatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/watch") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "fileId": c.fileId, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Download fetches the API endpoint's "media" value, instead of the normal @@ -4441,7 +5895,8 @@ func (c *FilesWatchCall) Do(opts ...googleapi.CallOption) (*Channel, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -4464,6 +5919,12 @@ func (c *FilesWatchCall) Do(opts ...googleapi.CallOption) (*Channel, error) { // "location": "path", // "required": true, // "type": "string" + // }, + // "supportsTeamDrives": { + // "default": "false", + // "description": "Whether the requesting application supports Team Drives.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "files/{fileId}/watch", @@ -4498,9 +5959,10 @@ type PermissionsCreateCall struct { permission *Permission urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } -// Create: Creates a permission for a file. +// Create: Creates a permission for a file or Team Drive. func (r *PermissionsService) Create(fileId string, permission *Permission) *PermissionsCreateCall { c := &PermissionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.fileId = fileId @@ -4508,8 +5970,8 @@ func (r *PermissionsService) Create(fileId string, permission *Permission) *Perm return c } -// EmailMessage sets the optional parameter "emailMessage": A custom -// message to include in the notification email. +// EmailMessage sets the optional parameter "emailMessage": A plain text +// custom message to include in the notification email. func (c *PermissionsCreateCall) EmailMessage(emailMessage string) *PermissionsCreateCall { c.urlParams_.Set("emailMessage", emailMessage) return c @@ -4525,6 +5987,13 @@ func (c *PermissionsCreateCall) SendNotificationEmail(sendNotificationEmail bool return c } +// SupportsTeamDrives sets the optional parameter "supportsTeamDrives": +// Whether the requesting application supports Team Drives. +func (c *PermissionsCreateCall) SupportsTeamDrives(supportsTeamDrives bool) *PermissionsCreateCall { + c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives)) + return c +} + // TransferOwnership sets the optional parameter "transferOwnership": // Whether to transfer ownership to the specified user and downgrade the // current owner to a writer. This parameter is required as an @@ -4534,6 +6003,15 @@ func (c *PermissionsCreateCall) TransferOwnership(transferOwnership bool) *Permi return c } +// UseDomainAdminAccess sets the optional parameter +// "useDomainAdminAccess": Issue the request as a domain administrator; +// if set to true, then the requester will be granted access if they are +// an administrator of the domain to which the item belongs. +func (c *PermissionsCreateCall) UseDomainAdminAccess(useDomainAdminAccess bool) *PermissionsCreateCall { + c.urlParams_.Set("useDomainAdminAccess", fmt.Sprint(useDomainAdminAccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -4550,26 +6028,40 @@ func (c *PermissionsCreateCall) Context(ctx context.Context) *PermissionsCreateC return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *PermissionsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *PermissionsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.permission) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/permissions") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "fileId": c.fileId, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.permissions.create" call. @@ -4604,12 +6096,13 @@ func (c *PermissionsCreateCall) Do(opts ...googleapi.CallOption) (*Permission, e HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { - // "description": "Creates a permission for a file.", + // "description": "Creates a permission for a file or Team Drive.", // "httpMethod": "POST", // "id": "drive.permissions.create", // "parameterOrder": [ @@ -4617,12 +6110,12 @@ func (c *PermissionsCreateCall) Do(opts ...googleapi.CallOption) (*Permission, e // ], // "parameters": { // "emailMessage": { - // "description": "A custom message to include in the notification email.", + // "description": "A plain text custom message to include in the notification email.", // "location": "query", // "type": "string" // }, // "fileId": { - // "description": "The ID of the file.", + // "description": "The ID of the file or Team Drive.", // "location": "path", // "required": true, // "type": "string" @@ -4632,11 +6125,23 @@ func (c *PermissionsCreateCall) Do(opts ...googleapi.CallOption) (*Permission, e // "location": "query", // "type": "boolean" // }, + // "supportsTeamDrives": { + // "default": "false", + // "description": "Whether the requesting application supports Team Drives.", + // "location": "query", + // "type": "boolean" + // }, // "transferOwnership": { // "default": "false", // "description": "Whether to transfer ownership to the specified user and downgrade the current owner to a writer. This parameter is required as an acknowledgement of the side effect.", // "location": "query", // "type": "boolean" + // }, + // "useDomainAdminAccess": { + // "default": "false", + // "description": "Issue the request as a domain administrator; if set to true, then the requester will be granted access if they are an administrator of the domain to which the item belongs.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "files/{fileId}/permissions", @@ -4662,6 +6167,7 @@ type PermissionsDeleteCall struct { permissionId string urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Delete: Deletes a permission. @@ -4672,6 +6178,22 @@ func (r *PermissionsService) Delete(fileId string, permissionId string) *Permiss return c } +// SupportsTeamDrives sets the optional parameter "supportsTeamDrives": +// Whether the requesting application supports Team Drives. +func (c *PermissionsDeleteCall) SupportsTeamDrives(supportsTeamDrives bool) *PermissionsDeleteCall { + c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives)) + return c +} + +// UseDomainAdminAccess sets the optional parameter +// "useDomainAdminAccess": Issue the request as a domain administrator; +// if set to true, then the requester will be granted access if they are +// an administrator of the domain to which the item belongs. +func (c *PermissionsDeleteCall) UseDomainAdminAccess(useDomainAdminAccess bool) *PermissionsDeleteCall { + c.urlParams_.Set("useDomainAdminAccess", fmt.Sprint(useDomainAdminAccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -4688,21 +6210,36 @@ func (c *PermissionsDeleteCall) Context(ctx context.Context) *PermissionsDeleteC return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *PermissionsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *PermissionsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/permissions/{permissionId}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "fileId": c.fileId, "permissionId": c.permissionId, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.permissions.delete" call. @@ -4727,7 +6264,7 @@ func (c *PermissionsDeleteCall) Do(opts ...googleapi.CallOption) error { // ], // "parameters": { // "fileId": { - // "description": "The ID of the file.", + // "description": "The ID of the file or Team Drive.", // "location": "path", // "required": true, // "type": "string" @@ -4737,6 +6274,18 @@ func (c *PermissionsDeleteCall) Do(opts ...googleapi.CallOption) error { // "location": "path", // "required": true, // "type": "string" + // }, + // "supportsTeamDrives": { + // "default": "false", + // "description": "Whether the requesting application supports Team Drives.", + // "location": "query", + // "type": "boolean" + // }, + // "useDomainAdminAccess": { + // "default": "false", + // "description": "Issue the request as a domain administrator; if set to true, then the requester will be granted access if they are an administrator of the domain to which the item belongs.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "files/{fileId}/permissions/{permissionId}", @@ -4757,6 +6306,7 @@ type PermissionsGetCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Get: Gets a permission by ID. @@ -4767,6 +6317,22 @@ func (r *PermissionsService) Get(fileId string, permissionId string) *Permission return c } +// SupportsTeamDrives sets the optional parameter "supportsTeamDrives": +// Whether the requesting application supports Team Drives. +func (c *PermissionsGetCall) SupportsTeamDrives(supportsTeamDrives bool) *PermissionsGetCall { + c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives)) + return c +} + +// UseDomainAdminAccess sets the optional parameter +// "useDomainAdminAccess": Issue the request as a domain administrator; +// if set to true, then the requester will be granted access if they are +// an administrator of the domain to which the item belongs. +func (c *PermissionsGetCall) UseDomainAdminAccess(useDomainAdminAccess bool) *PermissionsGetCall { + c.urlParams_.Set("useDomainAdminAccess", fmt.Sprint(useDomainAdminAccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -4793,24 +6359,39 @@ func (c *PermissionsGetCall) Context(ctx context.Context) *PermissionsGetCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *PermissionsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *PermissionsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/permissions/{permissionId}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "fileId": c.fileId, "permissionId": c.permissionId, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.permissions.get" call. @@ -4845,7 +6426,8 @@ func (c *PermissionsGetCall) Do(opts ...googleapi.CallOption) (*Permission, erro HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -4869,6 +6451,18 @@ func (c *PermissionsGetCall) Do(opts ...googleapi.CallOption) (*Permission, erro // "location": "path", // "required": true, // "type": "string" + // }, + // "supportsTeamDrives": { + // "default": "false", + // "description": "Whether the requesting application supports Team Drives.", + // "location": "query", + // "type": "boolean" + // }, + // "useDomainAdminAccess": { + // "default": "false", + // "description": "Issue the request as a domain administrator; if set to true, then the requester will be granted access if they are an administrator of the domain to which the item belongs.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "files/{fileId}/permissions/{permissionId}", @@ -4895,15 +6489,49 @@ type PermissionsListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } -// List: Lists a file's permissions. +// List: Lists a file's or Team Drive's permissions. func (r *PermissionsService) List(fileId string) *PermissionsListCall { c := &PermissionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.fileId = fileId return c } +// PageSize sets the optional parameter "pageSize": The maximum number +// of permissions to return per page. When not set for files in a Team +// Drive, at most 100 results will be returned. When not set for files +// that are not in a Team Drive, the entire list will be returned. +func (c *PermissionsListCall) PageSize(pageSize int64) *PermissionsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The token for +// continuing a previous list request on the next page. This should be +// set to the value of 'nextPageToken' from the previous response. +func (c *PermissionsListCall) PageToken(pageToken string) *PermissionsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// SupportsTeamDrives sets the optional parameter "supportsTeamDrives": +// Whether the requesting application supports Team Drives. +func (c *PermissionsListCall) SupportsTeamDrives(supportsTeamDrives bool) *PermissionsListCall { + c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives)) + return c +} + +// UseDomainAdminAccess sets the optional parameter +// "useDomainAdminAccess": Issue the request as a domain administrator; +// if set to true, then the requester will be granted access if they are +// an administrator of the domain to which the item belongs. +func (c *PermissionsListCall) UseDomainAdminAccess(useDomainAdminAccess bool) *PermissionsListCall { + c.urlParams_.Set("useDomainAdminAccess", fmt.Sprint(useDomainAdminAccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -4930,23 +6558,38 @@ func (c *PermissionsListCall) Context(ctx context.Context) *PermissionsListCall return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *PermissionsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *PermissionsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/permissions") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "fileId": c.fileId, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.permissions.list" call. @@ -4981,12 +6624,13 @@ func (c *PermissionsListCall) Do(opts ...googleapi.CallOption) (*PermissionList, HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { - // "description": "Lists a file's permissions.", + // "description": "Lists a file's or Team Drive's permissions.", // "httpMethod": "GET", // "id": "drive.permissions.list", // "parameterOrder": [ @@ -4994,10 +6638,35 @@ func (c *PermissionsListCall) Do(opts ...googleapi.CallOption) (*PermissionList, // ], // "parameters": { // "fileId": { - // "description": "The ID of the file.", + // "description": "The ID of the file or Team Drive.", // "location": "path", // "required": true, // "type": "string" + // }, + // "pageSize": { + // "description": "The maximum number of permissions to return per page. When not set for files in a Team Drive, at most 100 results will be returned. When not set for files that are not in a Team Drive, the entire list will be returned.", + // "format": "int32", + // "location": "query", + // "maximum": "100", + // "minimum": "1", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.", + // "location": "query", + // "type": "string" + // }, + // "supportsTeamDrives": { + // "default": "false", + // "description": "Whether the requesting application supports Team Drives.", + // "location": "query", + // "type": "boolean" + // }, + // "useDomainAdminAccess": { + // "default": "false", + // "description": "Issue the request as a domain administrator; if set to true, then the requester will be granted access if they are an administrator of the domain to which the item belongs.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "files/{fileId}/permissions", @@ -5016,6 +6685,27 @@ func (c *PermissionsListCall) Do(opts ...googleapi.CallOption) (*PermissionList, } +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *PermissionsListCall) Pages(ctx context.Context, f func(*PermissionList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "drive.permissions.update": type PermissionsUpdateCall struct { @@ -5025,6 +6715,7 @@ type PermissionsUpdateCall struct { permission *Permission urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Update: Updates a permission with patch semantics. @@ -5036,6 +6727,20 @@ func (r *PermissionsService) Update(fileId string, permissionId string, permissi return c } +// RemoveExpiration sets the optional parameter "removeExpiration": +// Whether to remove the expiration date. +func (c *PermissionsUpdateCall) RemoveExpiration(removeExpiration bool) *PermissionsUpdateCall { + c.urlParams_.Set("removeExpiration", fmt.Sprint(removeExpiration)) + return c +} + +// SupportsTeamDrives sets the optional parameter "supportsTeamDrives": +// Whether the requesting application supports Team Drives. +func (c *PermissionsUpdateCall) SupportsTeamDrives(supportsTeamDrives bool) *PermissionsUpdateCall { + c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives)) + return c +} + // TransferOwnership sets the optional parameter "transferOwnership": // Whether to transfer ownership to the specified user and downgrade the // current owner to a writer. This parameter is required as an @@ -5045,6 +6750,15 @@ func (c *PermissionsUpdateCall) TransferOwnership(transferOwnership bool) *Permi return c } +// UseDomainAdminAccess sets the optional parameter +// "useDomainAdminAccess": Issue the request as a domain administrator; +// if set to true, then the requester will be granted access if they are +// an administrator of the domain to which the item belongs. +func (c *PermissionsUpdateCall) UseDomainAdminAccess(useDomainAdminAccess bool) *PermissionsUpdateCall { + c.urlParams_.Set("useDomainAdminAccess", fmt.Sprint(useDomainAdminAccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -5061,27 +6775,41 @@ func (c *PermissionsUpdateCall) Context(ctx context.Context) *PermissionsUpdateC return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *PermissionsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *PermissionsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.permission) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/permissions/{permissionId}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "fileId": c.fileId, "permissionId": c.permissionId, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.permissions.update" call. @@ -5116,7 +6844,8 @@ func (c *PermissionsUpdateCall) Do(opts ...googleapi.CallOption) (*Permission, e HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -5130,7 +6859,7 @@ func (c *PermissionsUpdateCall) Do(opts ...googleapi.CallOption) (*Permission, e // ], // "parameters": { // "fileId": { - // "description": "The ID of the file.", + // "description": "The ID of the file or Team Drive.", // "location": "path", // "required": true, // "type": "string" @@ -5141,11 +6870,29 @@ func (c *PermissionsUpdateCall) Do(opts ...googleapi.CallOption) (*Permission, e // "required": true, // "type": "string" // }, + // "removeExpiration": { + // "default": "false", + // "description": "Whether to remove the expiration date.", + // "location": "query", + // "type": "boolean" + // }, + // "supportsTeamDrives": { + // "default": "false", + // "description": "Whether the requesting application supports Team Drives.", + // "location": "query", + // "type": "boolean" + // }, // "transferOwnership": { // "default": "false", // "description": "Whether to transfer ownership to the specified user and downgrade the current owner to a writer. This parameter is required as an acknowledgement of the side effect.", // "location": "query", // "type": "boolean" + // }, + // "useDomainAdminAccess": { + // "default": "false", + // "description": "Issue the request as a domain administrator; if set to true, then the requester will be granted access if they are an administrator of the domain to which the item belongs.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "files/{fileId}/permissions/{permissionId}", @@ -5172,6 +6919,7 @@ type RepliesCreateCall struct { reply *Reply urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Create: Creates a new reply to a comment. @@ -5199,27 +6947,41 @@ func (c *RepliesCreateCall) Context(ctx context.Context) *RepliesCreateCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RepliesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *RepliesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.reply) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}/replies") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "fileId": c.fileId, "commentId": c.commentId, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.replies.create" call. @@ -5254,7 +7016,8 @@ func (c *RepliesCreateCall) Do(opts ...googleapi.CallOption) (*Reply, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -5304,6 +7067,7 @@ type RepliesDeleteCall struct { replyId string urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Delete: Deletes a reply. @@ -5331,22 +7095,37 @@ func (c *RepliesDeleteCall) Context(ctx context.Context) *RepliesDeleteCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RepliesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *RepliesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}/replies/{replyId}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "fileId": c.fileId, "commentId": c.commentId, "replyId": c.replyId, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.replies.delete" call. @@ -5409,6 +7188,7 @@ type RepliesGetCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Get: Gets a reply by ID. @@ -5454,25 +7234,40 @@ func (c *RepliesGetCall) Context(ctx context.Context) *RepliesGetCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RepliesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *RepliesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}/replies/{replyId}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "fileId": c.fileId, "commentId": c.commentId, "replyId": c.replyId, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.replies.get" call. @@ -5507,7 +7302,8 @@ func (c *RepliesGetCall) Do(opts ...googleapi.CallOption) (*Reply, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -5568,6 +7364,7 @@ type RepliesListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // List: Lists a comment's replies. @@ -5627,24 +7424,39 @@ func (c *RepliesListCall) Context(ctx context.Context) *RepliesListCall { return c } -func (c *RepliesListCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RepliesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RepliesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}/replies") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "fileId": c.fileId, "commentId": c.commentId, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.replies.list" call. @@ -5679,7 +7491,8 @@ func (c *RepliesListCall) Do(opts ...googleapi.CallOption) (*ReplyList, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -5769,6 +7582,7 @@ type RepliesUpdateCall struct { reply *Reply urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Update: Updates a reply with patch semantics. @@ -5797,28 +7611,42 @@ func (c *RepliesUpdateCall) Context(ctx context.Context) *RepliesUpdateCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RepliesUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *RepliesUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.reply) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}/replies/{replyId}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "fileId": c.fileId, "commentId": c.commentId, "replyId": c.replyId, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.replies.update" call. @@ -5853,7 +7681,8 @@ func (c *RepliesUpdateCall) Do(opts ...googleapi.CallOption) (*Reply, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -5909,6 +7738,7 @@ type RevisionsDeleteCall struct { revisionId string urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Delete: Permanently deletes a revision. This method is only @@ -5936,21 +7766,36 @@ func (c *RevisionsDeleteCall) Context(ctx context.Context) *RevisionsDeleteCall return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RevisionsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *RevisionsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/revisions/{revisionId}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "fileId": c.fileId, "revisionId": c.revisionId, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.revisions.delete" call. @@ -6006,6 +7851,7 @@ type RevisionsGetCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // Get: Gets a revision's metadata or content by ID. @@ -6051,24 +7897,39 @@ func (c *RevisionsGetCall) Context(ctx context.Context) *RevisionsGetCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RevisionsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *RevisionsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/revisions/{revisionId}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "fileId": c.fileId, "revisionId": c.revisionId, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Download fetches the API endpoint's "media" value, instead of the normal @@ -6119,7 +7980,8 @@ func (c *RevisionsGetCall) Do(opts ...googleapi.CallOption) (*Revision, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -6178,6 +8040,7 @@ type RevisionsListCall struct { urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context + header_ http.Header } // List: Lists a file's revisions. @@ -6187,6 +8050,21 @@ func (r *RevisionsService) List(fileId string) *RevisionsListCall { return c } +// PageSize sets the optional parameter "pageSize": The maximum number +// of revisions to return per page. +func (c *RevisionsListCall) PageSize(pageSize int64) *RevisionsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The token for +// continuing a previous list request on the next page. This should be +// set to the value of 'nextPageToken' from the previous response. +func (c *RevisionsListCall) PageToken(pageToken string) *RevisionsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -6213,23 +8091,38 @@ func (c *RevisionsListCall) Context(ctx context.Context) *RevisionsListCall { return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RevisionsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *RevisionsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/revisions") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "fileId": c.fileId, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.revisions.list" call. @@ -6264,7 +8157,8 @@ func (c *RevisionsListCall) Do(opts ...googleapi.CallOption) (*RevisionList, err HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -6281,6 +8175,20 @@ func (c *RevisionsListCall) Do(opts ...googleapi.CallOption) (*RevisionList, err // "location": "path", // "required": true, // "type": "string" + // }, + // "pageSize": { + // "default": "200", + // "description": "The maximum number of revisions to return per page.", + // "format": "int32", + // "location": "query", + // "maximum": "1000", + // "minimum": "1", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.", + // "location": "query", + // "type": "string" // } // }, // "path": "files/{fileId}/revisions", @@ -6300,6 +8208,27 @@ func (c *RevisionsListCall) Do(opts ...googleapi.CallOption) (*RevisionList, err } +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RevisionsListCall) Pages(ctx context.Context, f func(*RevisionList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "drive.revisions.update": type RevisionsUpdateCall struct { @@ -6309,6 +8238,7 @@ type RevisionsUpdateCall struct { revision *Revision urlParams_ gensupport.URLParams ctx_ context.Context + header_ http.Header } // Update: Updates a revision with patch semantics. @@ -6336,27 +8266,41 @@ func (c *RevisionsUpdateCall) Context(ctx context.Context) *RevisionsUpdateCall return c } +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RevisionsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + func (c *RevisionsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.revision) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/revisions/{revisionId}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "fileId": c.fileId, "revisionId": c.revisionId, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "drive.revisions.update" call. @@ -6391,7 +8335,8 @@ func (c *RevisionsUpdateCall) Do(opts ...googleapi.CallOption) (*Revision, error HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -6432,3 +8377,749 @@ func (c *RevisionsUpdateCall) Do(opts ...googleapi.CallOption) (*Revision, error // } } + +// method id "drive.teamdrives.create": + +type TeamdrivesCreateCall struct { + s *Service + teamdrive *TeamDrive + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a new Team Drive. +func (r *TeamdrivesService) Create(requestId string, teamdrive *TeamDrive) *TeamdrivesCreateCall { + c := &TeamdrivesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.urlParams_.Set("requestId", requestId) + c.teamdrive = teamdrive + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TeamdrivesCreateCall) Fields(s ...googleapi.Field) *TeamdrivesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TeamdrivesCreateCall) Context(ctx context.Context) *TeamdrivesCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TeamdrivesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TeamdrivesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.teamdrive) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "teamdrives") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "drive.teamdrives.create" call. +// Exactly one of *TeamDrive or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TeamDrive.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TeamdrivesCreateCall) Do(opts ...googleapi.CallOption) (*TeamDrive, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TeamDrive{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new Team Drive.", + // "httpMethod": "POST", + // "id": "drive.teamdrives.create", + // "parameterOrder": [ + // "requestId" + // ], + // "parameters": { + // "requestId": { + // "description": "An ID, such as a random UUID, which uniquely identifies this user's request for idempotent creation of a Team Drive. A repeated request by the same user and with the same request ID will avoid creating duplicates by attempting to create the same Team Drive. If the Team Drive already exists a 409 error will be returned.", + // "location": "query", + // "required": true, + // "type": "string" + // } + // }, + // "path": "teamdrives", + // "request": { + // "$ref": "TeamDrive" + // }, + // "response": { + // "$ref": "TeamDrive" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive" + // ] + // } + +} + +// method id "drive.teamdrives.delete": + +type TeamdrivesDeleteCall struct { + s *Service + teamDriveId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Permanently deletes a Team Drive for which the user is an +// organizer. The Team Drive cannot contain any untrashed items. +func (r *TeamdrivesService) Delete(teamDriveId string) *TeamdrivesDeleteCall { + c := &TeamdrivesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.teamDriveId = teamDriveId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TeamdrivesDeleteCall) Fields(s ...googleapi.Field) *TeamdrivesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TeamdrivesDeleteCall) Context(ctx context.Context) *TeamdrivesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TeamdrivesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TeamdrivesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "teamdrives/{teamDriveId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "teamDriveId": c.teamDriveId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "drive.teamdrives.delete" call. +func (c *TeamdrivesDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes a Team Drive for which the user is an organizer. The Team Drive cannot contain any untrashed items.", + // "httpMethod": "DELETE", + // "id": "drive.teamdrives.delete", + // "parameterOrder": [ + // "teamDriveId" + // ], + // "parameters": { + // "teamDriveId": { + // "description": "The ID of the Team Drive", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "teamdrives/{teamDriveId}", + // "scopes": [ + // "https://www.googleapis.com/auth/drive" + // ] + // } + +} + +// method id "drive.teamdrives.get": + +type TeamdrivesGetCall struct { + s *Service + teamDriveId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets a Team Drive's metadata by ID. +func (r *TeamdrivesService) Get(teamDriveId string) *TeamdrivesGetCall { + c := &TeamdrivesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.teamDriveId = teamDriveId + return c +} + +// UseDomainAdminAccess sets the optional parameter +// "useDomainAdminAccess": Issue the request as a domain administrator; +// if set to true, then the requester will be granted access if they are +// an administrator of the domain to which the Team Drive belongs. +func (c *TeamdrivesGetCall) UseDomainAdminAccess(useDomainAdminAccess bool) *TeamdrivesGetCall { + c.urlParams_.Set("useDomainAdminAccess", fmt.Sprint(useDomainAdminAccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TeamdrivesGetCall) Fields(s ...googleapi.Field) *TeamdrivesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TeamdrivesGetCall) IfNoneMatch(entityTag string) *TeamdrivesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TeamdrivesGetCall) Context(ctx context.Context) *TeamdrivesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TeamdrivesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TeamdrivesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "teamdrives/{teamDriveId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "teamDriveId": c.teamDriveId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "drive.teamdrives.get" call. +// Exactly one of *TeamDrive or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TeamDrive.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TeamdrivesGetCall) Do(opts ...googleapi.CallOption) (*TeamDrive, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TeamDrive{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a Team Drive's metadata by ID.", + // "httpMethod": "GET", + // "id": "drive.teamdrives.get", + // "parameterOrder": [ + // "teamDriveId" + // ], + // "parameters": { + // "teamDriveId": { + // "description": "The ID of the Team Drive", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "useDomainAdminAccess": { + // "default": "false", + // "description": "Issue the request as a domain administrator; if set to true, then the requester will be granted access if they are an administrator of the domain to which the Team Drive belongs.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "teamdrives/{teamDriveId}", + // "response": { + // "$ref": "TeamDrive" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.readonly" + // ] + // } + +} + +// method id "drive.teamdrives.list": + +type TeamdrivesListCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists the user's Team Drives. +func (r *TeamdrivesService) List() *TeamdrivesListCall { + c := &TeamdrivesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of +// Team Drives to return. +func (c *TeamdrivesListCall) PageSize(pageSize int64) *TeamdrivesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": Page token for +// Team Drives. +func (c *TeamdrivesListCall) PageToken(pageToken string) *TeamdrivesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Q sets the optional parameter "q": Query string for searching Team +// Drives. +func (c *TeamdrivesListCall) Q(q string) *TeamdrivesListCall { + c.urlParams_.Set("q", q) + return c +} + +// UseDomainAdminAccess sets the optional parameter +// "useDomainAdminAccess": Issue the request as a domain administrator; +// if set to true, then all Team Drives of the domain in which the +// requester is an administrator are returned. +func (c *TeamdrivesListCall) UseDomainAdminAccess(useDomainAdminAccess bool) *TeamdrivesListCall { + c.urlParams_.Set("useDomainAdminAccess", fmt.Sprint(useDomainAdminAccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TeamdrivesListCall) Fields(s ...googleapi.Field) *TeamdrivesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TeamdrivesListCall) IfNoneMatch(entityTag string) *TeamdrivesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TeamdrivesListCall) Context(ctx context.Context) *TeamdrivesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TeamdrivesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TeamdrivesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "teamdrives") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "drive.teamdrives.list" call. +// Exactly one of *TeamDriveList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TeamDriveList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TeamdrivesListCall) Do(opts ...googleapi.CallOption) (*TeamDriveList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TeamDriveList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the user's Team Drives.", + // "httpMethod": "GET", + // "id": "drive.teamdrives.list", + // "parameters": { + // "pageSize": { + // "default": "10", + // "description": "Maximum number of Team Drives to return.", + // "format": "int32", + // "location": "query", + // "maximum": "100", + // "minimum": "1", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Page token for Team Drives.", + // "location": "query", + // "type": "string" + // }, + // "q": { + // "description": "Query string for searching Team Drives.", + // "location": "query", + // "type": "string" + // }, + // "useDomainAdminAccess": { + // "default": "false", + // "description": "Issue the request as a domain administrator; if set to true, then all Team Drives of the domain in which the requester is an administrator are returned.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "teamdrives", + // "response": { + // "$ref": "TeamDriveList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TeamdrivesListCall) Pages(ctx context.Context, f func(*TeamDriveList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "drive.teamdrives.update": + +type TeamdrivesUpdateCall struct { + s *Service + teamDriveId string + teamdrive *TeamDrive + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates a Team Drive's metadata +func (r *TeamdrivesService) Update(teamDriveId string, teamdrive *TeamDrive) *TeamdrivesUpdateCall { + c := &TeamdrivesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.teamDriveId = teamDriveId + c.teamdrive = teamdrive + return c +} + +// UseDomainAdminAccess sets the optional parameter +// "useDomainAdminAccess": Issue the request as a domain administrator; +// if set to true, then the requester will be granted access if they are +// an administrator of the domain to which the Team Drive belongs. +func (c *TeamdrivesUpdateCall) UseDomainAdminAccess(useDomainAdminAccess bool) *TeamdrivesUpdateCall { + c.urlParams_.Set("useDomainAdminAccess", fmt.Sprint(useDomainAdminAccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TeamdrivesUpdateCall) Fields(s ...googleapi.Field) *TeamdrivesUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TeamdrivesUpdateCall) Context(ctx context.Context) *TeamdrivesUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TeamdrivesUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TeamdrivesUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.teamdrive) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "teamdrives/{teamDriveId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "teamDriveId": c.teamDriveId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "drive.teamdrives.update" call. +// Exactly one of *TeamDrive or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TeamDrive.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TeamdrivesUpdateCall) Do(opts ...googleapi.CallOption) (*TeamDrive, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TeamDrive{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a Team Drive's metadata", + // "httpMethod": "PATCH", + // "id": "drive.teamdrives.update", + // "parameterOrder": [ + // "teamDriveId" + // ], + // "parameters": { + // "teamDriveId": { + // "description": "The ID of the Team Drive", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "useDomainAdminAccess": { + // "default": "false", + // "description": "Issue the request as a domain administrator; if set to true, then the requester will be granted access if they are an administrator of the domain to which the Team Drive belongs.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "teamdrives/{teamDriveId}", + // "request": { + // "$ref": "TeamDrive" + // }, + // "response": { + // "$ref": "TeamDrive" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive" + // ] + // } + +} diff --git a/vendor/google.golang.org/api/gensupport/backoff.go b/vendor/google.golang.org/api/gensupport/backoff.go index 13561404..94b7789e 100644 --- a/vendor/google.golang.org/api/gensupport/backoff.go +++ b/vendor/google.golang.org/api/gensupport/backoff.go @@ -9,6 +9,8 @@ import ( "time" ) +// BackoffStrategy defines the set of functions that a backoff-er must +// implement. type BackoffStrategy interface { // Pause returns the duration of the next pause and true if the operation should be // retried, or false if no further retries should be attempted. @@ -28,6 +30,7 @@ type ExponentialBackoff struct { n uint } +// Pause returns the amount of time the caller should wait. func (eb *ExponentialBackoff) Pause() (time.Duration, bool) { if eb.total > eb.Max { return 0, false @@ -40,6 +43,8 @@ func (eb *ExponentialBackoff) Pause() (time.Duration, bool) { return d, true } +// Reset resets the backoff strategy such that the next Pause call will begin +// counting from the start. It is not safe to call concurrently with Pause. func (eb *ExponentialBackoff) Reset() { eb.n = 0 eb.total = 0 diff --git a/vendor/google.golang.org/api/gensupport/buffer.go b/vendor/google.golang.org/api/gensupport/buffer.go index 4b8ec142..3d0817ed 100644 --- a/vendor/google.golang.org/api/gensupport/buffer.go +++ b/vendor/google.golang.org/api/gensupport/buffer.go @@ -11,8 +11,9 @@ import ( "google.golang.org/api/googleapi" ) -// ResumableBuffer buffers data from an io.Reader to support uploading media in retryable chunks. -type ResumableBuffer struct { +// MediaBuffer buffers data from an io.Reader to support uploading media in +// retryable chunks. It should be created with NewMediaBuffer. +type MediaBuffer struct { media io.Reader chunk []byte // The current chunk which is pending upload. The capacity is the chunk size. @@ -22,42 +23,43 @@ type ResumableBuffer struct { off int64 } -func NewResumableBuffer(media io.Reader, chunkSize int) *ResumableBuffer { - return &ResumableBuffer{media: media, chunk: make([]byte, 0, chunkSize)} +// NewMediaBuffer initializes a MediaBuffer. +func NewMediaBuffer(media io.Reader, chunkSize int) *MediaBuffer { + return &MediaBuffer{media: media, chunk: make([]byte, 0, chunkSize)} } // Chunk returns the current buffered chunk, the offset in the underlying media // from which the chunk is drawn, and the size of the chunk. // Successive calls to Chunk return the same chunk between calls to Next. -func (rb *ResumableBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) { +func (mb *MediaBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) { // There may already be data in chunk if Next has not been called since the previous call to Chunk. - if rb.err == nil && len(rb.chunk) == 0 { - rb.err = rb.loadChunk() + if mb.err == nil && len(mb.chunk) == 0 { + mb.err = mb.loadChunk() } - return bytes.NewReader(rb.chunk), rb.off, len(rb.chunk), rb.err + return bytes.NewReader(mb.chunk), mb.off, len(mb.chunk), mb.err } // loadChunk will read from media into chunk, up to the capacity of chunk. -func (rb *ResumableBuffer) loadChunk() error { - bufSize := cap(rb.chunk) - rb.chunk = rb.chunk[:bufSize] +func (mb *MediaBuffer) loadChunk() error { + bufSize := cap(mb.chunk) + mb.chunk = mb.chunk[:bufSize] read := 0 var err error for err == nil && read < bufSize { var n int - n, err = rb.media.Read(rb.chunk[read:]) + n, err = mb.media.Read(mb.chunk[read:]) read += n } - rb.chunk = rb.chunk[:read] + mb.chunk = mb.chunk[:read] return err } // Next advances to the next chunk, which will be returned by the next call to Chunk. // Calls to Next without a corresponding prior call to Chunk will have no effect. -func (rb *ResumableBuffer) Next() { - rb.off += int64(len(rb.chunk)) - rb.chunk = rb.chunk[0:0] +func (mb *MediaBuffer) Next() { + mb.off += int64(len(mb.chunk)) + mb.chunk = mb.chunk[0:0] } type readerTyper struct { diff --git a/vendor/google.golang.org/api/gensupport/header.go b/vendor/google.golang.org/api/gensupport/header.go new file mode 100644 index 00000000..cb5e67c7 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/header.go @@ -0,0 +1,22 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "fmt" + "runtime" + "strings" +) + +// GoogleClientHeader returns the value to use for the x-goog-api-client +// header, which is used internally by Google. +func GoogleClientHeader(generatorVersion, clientElement string) string { + elts := []string{"gl-go/" + strings.Replace(runtime.Version(), " ", "_", -1)} + if clientElement != "" { + elts = append(elts, clientElement) + } + elts = append(elts, fmt.Sprintf("gdcl/%s", generatorVersion)) + return strings.Join(elts, " ") +} diff --git a/vendor/google.golang.org/api/gensupport/json.go b/vendor/google.golang.org/api/gensupport/json.go index dd7bcd2e..c01e3218 100644 --- a/vendor/google.golang.org/api/gensupport/json.go +++ b/vendor/google.golang.org/api/gensupport/json.go @@ -12,29 +12,43 @@ import ( ) // MarshalJSON returns a JSON encoding of schema containing only selected fields. -// A field is selected if: -// * it has a non-empty value, or -// * its field name is present in forceSendFields, and -// * it is not a nil pointer or nil interface. +// A field is selected if any of the following is true: +// * it has a non-empty value +// * its field name is present in forceSendFields and it is not a nil pointer or nil interface +// * its field name is present in nullFields. // The JSON key for each selected field is taken from the field's json: struct tag. -func MarshalJSON(schema interface{}, forceSendFields []string) ([]byte, error) { - if len(forceSendFields) == 0 { +func MarshalJSON(schema interface{}, forceSendFields, nullFields []string) ([]byte, error) { + if len(forceSendFields) == 0 && len(nullFields) == 0 { return json.Marshal(schema) } - mustInclude := make(map[string]struct{}) + mustInclude := make(map[string]bool) for _, f := range forceSendFields { - mustInclude[f] = struct{}{} + mustInclude[f] = true + } + useNull := make(map[string]bool) + useNullMaps := make(map[string]map[string]bool) + for _, nf := range nullFields { + parts := strings.SplitN(nf, ".", 2) + field := parts[0] + if len(parts) == 1 { + useNull[field] = true + } else { + if useNullMaps[field] == nil { + useNullMaps[field] = map[string]bool{} + } + useNullMaps[field][parts[1]] = true + } } - dataMap, err := schemaToMap(schema, mustInclude) + dataMap, err := schemaToMap(schema, mustInclude, useNull, useNullMaps) if err != nil { return nil, err } return json.Marshal(dataMap) } -func schemaToMap(schema interface{}, mustInclude map[string]struct{}) (map[string]interface{}, error) { +func schemaToMap(schema interface{}, mustInclude, useNull map[string]bool, useNullMaps map[string]map[string]bool) (map[string]interface{}, error) { m := make(map[string]interface{}) s := reflect.ValueOf(schema) st := s.Type() @@ -54,10 +68,36 @@ func schemaToMap(schema interface{}, mustInclude map[string]struct{}) (map[strin v := s.Field(i) f := st.Field(i) + + if useNull[f.Name] { + if !isEmptyValue(v) { + return nil, fmt.Errorf("field %q in NullFields has non-empty value", f.Name) + } + m[tag.apiName] = nil + continue + } + if !includeField(v, f, mustInclude) { continue } + // If map fields are explicitly set to null, use a map[string]interface{}. + if f.Type.Kind() == reflect.Map && useNullMaps[f.Name] != nil { + ms, ok := v.Interface().(map[string]string) + if !ok { + return nil, fmt.Errorf("field %q has keys in NullFields but is not a map[string]string", f.Name) + } + mi := map[string]interface{}{} + for k, v := range ms { + mi[k] = v + } + for k := range useNullMaps[f.Name] { + mi[k] = nil + } + m[tag.apiName] = mi + continue + } + // nil maps are treated as empty maps. if f.Type.Kind() == reflect.Map && v.IsNil() { m[tag.apiName] = map[string]string{} @@ -127,7 +167,7 @@ func parseJSONTag(val string) (jsonTag, error) { } // Reports whether the struct field "f" with value "v" should be included in JSON output. -func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]struct{}) bool { +func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]bool) bool { // The regular JSON encoding of a nil pointer is "null", which means "delete this field". // Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set. // However, many fields are not pointers, so there would be no way to delete these fields. @@ -144,8 +184,7 @@ func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string return false } - _, ok := mustInclude[f.Name] - return ok || !isEmptyValue(v) + return mustInclude[f.Name] || !isEmptyValue(v) } // isEmptyValue reports whether v is the empty value for its type. This diff --git a/vendor/google.golang.org/api/gensupport/jsonfloat.go b/vendor/google.golang.org/api/gensupport/jsonfloat.go new file mode 100644 index 00000000..83778508 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/jsonfloat.go @@ -0,0 +1,57 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gensupport + +import ( + "encoding/json" + "errors" + "fmt" + "math" +) + +// JSONFloat64 is a float64 that supports proper unmarshaling of special float +// values in JSON, according to +// https://developers.google.com/protocol-buffers/docs/proto3#json. Although +// that is a proto-to-JSON spec, it applies to all Google APIs. +// +// The jsonpb package +// (https://github.com/golang/protobuf/blob/master/jsonpb/jsonpb.go) has +// similar functionality, but only for direct translation from proto messages +// to JSON. +type JSONFloat64 float64 + +func (f *JSONFloat64) UnmarshalJSON(data []byte) error { + var ff float64 + if err := json.Unmarshal(data, &ff); err == nil { + *f = JSONFloat64(ff) + return nil + } + var s string + if err := json.Unmarshal(data, &s); err == nil { + switch s { + case "NaN": + ff = math.NaN() + case "Infinity": + ff = math.Inf(1) + case "-Infinity": + ff = math.Inf(-1) + default: + return fmt.Errorf("google.golang.org/api/internal: bad float string %q", s) + } + *f = JSONFloat64(ff) + return nil + } + return errors.New("google.golang.org/api/internal: data not float or string") +} diff --git a/vendor/google.golang.org/api/gensupport/media.go b/vendor/google.golang.org/api/gensupport/media.go index 817f46f5..4cef4adb 100644 --- a/vendor/google.golang.org/api/gensupport/media.go +++ b/vendor/google.golang.org/api/gensupport/media.go @@ -5,12 +5,15 @@ package gensupport import ( + "bytes" "fmt" "io" "io/ioutil" "mime/multipart" "net/http" "net/textproto" + "strings" + "sync" "google.golang.org/api/googleapi" ) @@ -103,12 +106,13 @@ type typeReader struct { typ string } -// multipartReader combines the contents of multiple readers to creat a multipart/related HTTP body. +// multipartReader combines the contents of multiple readers to create a multipart/related HTTP body. // Close must be called if reads from the multipartReader are abandoned before reaching EOF. type multipartReader struct { pr *io.PipeReader - pipeOpen bool ctype string + mu sync.Mutex + pipeOpen bool } func newMultipartReader(parts []typeReader) *multipartReader { @@ -144,10 +148,13 @@ func (mp *multipartReader) Read(data []byte) (n int, err error) { } func (mp *multipartReader) Close() error { + mp.mu.Lock() if !mp.pipeOpen { + mp.mu.Unlock() return nil } mp.pipeOpen = false + mp.mu.Unlock() return mp.pr.Close() } @@ -174,27 +181,162 @@ func typeHeader(contentType string) textproto.MIMEHeader { // PrepareUpload determines whether the data in the supplied reader should be // uploaded in a single request, or in sequential chunks. // chunkSize is the size of the chunk that media should be split into. -// If chunkSize is non-zero and the contents of media do not fit in a single -// chunk (or there is an error reading media), then media will be returned as a -// ResumableBuffer. Otherwise, media will be returned as a Reader. +// +// If chunkSize is zero, media is returned as the first value, and the other +// two return values are nil, true. +// +// Otherwise, a MediaBuffer is returned, along with a bool indicating whether the +// contents of media fit in a single chunk. // // After PrepareUpload has been called, media should no longer be used: the // media content should be accessed via one of the return values. -func PrepareUpload(media io.Reader, chunkSize int) (io.Reader, - *ResumableBuffer) { +func PrepareUpload(media io.Reader, chunkSize int) (r io.Reader, mb *MediaBuffer, singleChunk bool) { if chunkSize == 0 { // do not chunk - return media, nil + return media, nil, true + } + mb = NewMediaBuffer(media, chunkSize) + _, _, _, err := mb.Chunk() + // If err is io.EOF, we can upload this in a single request. Otherwise, err is + // either nil or a non-EOF error. If it is the latter, then the next call to + // mb.Chunk will return the same error. Returning a MediaBuffer ensures that this + // error will be handled at some point. + return nil, mb, err == io.EOF +} + +// MediaInfo holds information for media uploads. It is intended for use by generated +// code only. +type MediaInfo struct { + // At most one of Media and MediaBuffer will be set. + media io.Reader + buffer *MediaBuffer + singleChunk bool + mType string + size int64 // mediaSize, if known. Used only for calls to progressUpdater_. + progressUpdater googleapi.ProgressUpdater +} + +// NewInfoFromMedia should be invoked from the Media method of a call. It returns a +// MediaInfo populated with chunk size and content type, and a reader or MediaBuffer +// if needed. +func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo { + mi := &MediaInfo{} + opts := googleapi.ProcessMediaOptions(options) + if !opts.ForceEmptyContentType { + r, mi.mType = DetermineContentType(r, opts.ContentType) + } + mi.media, mi.buffer, mi.singleChunk = PrepareUpload(r, opts.ChunkSize) + return mi +} + +// NewInfoFromResumableMedia should be invoked from the ResumableMedia method of a +// call. It returns a MediaInfo using the given reader, size and media type. +func NewInfoFromResumableMedia(r io.ReaderAt, size int64, mediaType string) *MediaInfo { + rdr := ReaderAtToReader(r, size) + rdr, mType := DetermineContentType(rdr, mediaType) + return &MediaInfo{ + size: size, + mType: mType, + buffer: NewMediaBuffer(rdr, googleapi.DefaultUploadChunkSize), + media: nil, + singleChunk: false, + } +} + +// SetProgressUpdater sets the progress updater for the media info. +func (mi *MediaInfo) SetProgressUpdater(pu googleapi.ProgressUpdater) { + if mi != nil { + mi.progressUpdater = pu + } +} + +// UploadType determines the type of upload: a single request, or a resumable +// series of requests. +func (mi *MediaInfo) UploadType() string { + if mi.singleChunk { + return "multipart" } + return "resumable" +} - rb := NewResumableBuffer(media, chunkSize) - rdr, _, _, err := rb.Chunk() +// UploadRequest sets up an HTTP request for media upload. It adds headers +// as necessary, and returns a replacement for the body and a function for http.Request.GetBody. +func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newBody io.Reader, getBody func() (io.ReadCloser, error), cleanup func()) { + cleanup = func() {} + if mi == nil { + return body, nil, cleanup + } + var media io.Reader + if mi.media != nil { + // This only happens when the caller has turned off chunking. In that + // case, we write all of media in a single non-retryable request. + media = mi.media + } else if mi.singleChunk { + // The data fits in a single chunk, which has now been read into the MediaBuffer. + // We obtain that chunk so we can write it in a single request. The request can + // be retried because the data is stored in the MediaBuffer. + media, _, _, _ = mi.buffer.Chunk() + } + if media != nil { + fb := readerFunc(body) + fm := readerFunc(media) + combined, ctype := CombineBodyMedia(body, "application/json", media, mi.mType) + if fb != nil && fm != nil { + getBody = func() (io.ReadCloser, error) { + rb := ioutil.NopCloser(fb()) + rm := ioutil.NopCloser(fm()) + r, _ := CombineBodyMedia(rb, "application/json", rm, mi.mType) + return r, nil + } + } + cleanup = func() { combined.Close() } + reqHeaders.Set("Content-Type", ctype) + body = combined + } + if mi.buffer != nil && mi.mType != "" && !mi.singleChunk { + reqHeaders.Set("X-Upload-Content-Type", mi.mType) + } + return body, getBody, cleanup +} - if err == io.EOF { // we can upload this in a single request - return rdr, nil +// readerFunc returns a function that always returns an io.Reader that has the same +// contents as r, provided that can be done without consuming r. Otherwise, it +// returns nil. +// See http.NewRequest (in net/http/request.go). +func readerFunc(r io.Reader) func() io.Reader { + switch r := r.(type) { + case *bytes.Buffer: + buf := r.Bytes() + return func() io.Reader { return bytes.NewReader(buf) } + case *bytes.Reader: + snapshot := *r + return func() io.Reader { r := snapshot; return &r } + case *strings.Reader: + snapshot := *r + return func() io.Reader { r := snapshot; return &r } + default: + return nil + } +} + +// ResumableUpload returns an appropriately configured ResumableUpload value if the +// upload is resumable, or nil otherwise. +func (mi *MediaInfo) ResumableUpload(locURI string) *ResumableUpload { + if mi == nil || mi.singleChunk { + return nil + } + return &ResumableUpload{ + URI: locURI, + Media: mi.buffer, + MediaType: mi.mType, + Callback: func(curr int64) { + if mi.progressUpdater != nil { + mi.progressUpdater(curr, mi.size) + } + }, } - // err might be a non-EOF error. If it is, the next call to rb.Chunk will - // return the same error. Returning a ResumableBuffer ensures that this error - // will be handled at some point. +} - return nil, rb +// SetGetBody sets the GetBody field of req to f. +func SetGetBody(req *http.Request, f func() (io.ReadCloser, error)) { + req.GetBody = f } diff --git a/vendor/google.golang.org/api/gensupport/params.go b/vendor/google.golang.org/api/gensupport/params.go index 3b3c7439..0e878a42 100644 --- a/vendor/google.golang.org/api/gensupport/params.go +++ b/vendor/google.golang.org/api/gensupport/params.go @@ -43,6 +43,7 @@ func (u URLParams) Encode() string { return url.Values(u).Encode() } +// SetOptions sets the URL params and any additional call options. func SetOptions(u URLParams, opts ...googleapi.CallOption) { for _, o := range opts { u.Set(o.Get()) diff --git a/vendor/google.golang.org/api/gensupport/resumable.go b/vendor/google.golang.org/api/gensupport/resumable.go index b3e774aa..2552a6ac 100644 --- a/vendor/google.golang.org/api/gensupport/resumable.go +++ b/vendor/google.golang.org/api/gensupport/resumable.go @@ -5,21 +5,16 @@ package gensupport import ( + "context" + "errors" "fmt" "io" "net/http" "sync" "time" - - "golang.org/x/net/context" - "golang.org/x/net/context/ctxhttp" ) const ( - // statusResumeIncomplete is the code returned by the Google uploader - // when the transfer is not yet complete. - statusResumeIncomplete = 308 - // statusTooManyRequests is returned by the storage API if the // per-project limits have been temporarily exceeded. The request // should be retried. @@ -35,7 +30,7 @@ type ResumableUpload struct { URI string UserAgent string // User-Agent for header of the request // Media is the object being uploaded. - Media *ResumableBuffer + Media *MediaBuffer // MediaType defines the media type, e.g. "image/jpeg". MediaType string @@ -80,8 +75,23 @@ func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, req.Header.Set("Content-Range", contentRange) req.Header.Set("Content-Type", rx.MediaType) req.Header.Set("User-Agent", rx.UserAgent) - return ctxhttp.Do(ctx, rx.Client, req) + // Google's upload endpoint uses status code 308 for a + // different purpose than the "308 Permanent Redirect" + // since-standardized in RFC 7238. Because of the conflict in + // semantics, Google added this new request header which + // causes it to not use "308" and instead reply with 200 OK + // and sets the upload-specific "X-HTTP-Status-Code-Override: + // 308" response header. + req.Header.Set("X-GUploader-No-308", "yes") + + return SendRequest(ctx, rx.Client, req) +} + +func statusResumeIncomplete(resp *http.Response) bool { + // This is how the server signals "status resume incomplete" + // when X-GUploader-No-308 is set to "yes": + return resp != nil && resp.Header.Get("X-Http-Status-Code-Override") == "308" } // reportProgress calls a user-supplied callback to report upload progress. @@ -112,11 +122,17 @@ func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, e return res, err } - if res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK { + // We sent "X-GUploader-No-308: yes" (see comment elsewhere in + // this file), so we don't expect to get a 308. + if res.StatusCode == 308 { + return nil, errors.New("unexpected 308 response status code") + } + + if res.StatusCode == http.StatusOK { rx.reportProgress(off, off+int64(size)) } - if res.StatusCode == statusResumeIncomplete { + if statusResumeIncomplete(res) { rx.Media.Next() } return res, nil @@ -135,6 +151,8 @@ func contextDone(ctx context.Context) bool { // It retries using the provided back off strategy until cancelled or the // strategy indicates to stop retrying. // It is called from the auto-generated API code and is not visible to the user. +// Before sending an HTTP request, Upload calls any registered hook functions, +// and calls the returned functions after the request returns (see send.go). // rx is private to the auto-generated API code. // Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close. func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) { @@ -176,7 +194,7 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err // If the chunk was uploaded successfully, but there's still // more to go, upload the next chunk without any delay. - if status == statusResumeIncomplete { + if statusResumeIncomplete(resp) { pause = 0 backoff.Reset() resp.Body.Close() diff --git a/vendor/google.golang.org/api/gensupport/retry.go b/vendor/google.golang.org/api/gensupport/retry.go index 7f83d1da..fdde3f42 100644 --- a/vendor/google.golang.org/api/gensupport/retry.go +++ b/vendor/google.golang.org/api/gensupport/retry.go @@ -1,12 +1,25 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package gensupport import ( + "context" "io" "net" "net/http" "time" - - "golang.org/x/net/context" ) // Retry invokes the given function, retrying it multiple times if the connection failed or @@ -55,23 +68,17 @@ func DefaultBackoffStrategy() BackoffStrategy { // shouldRetry returns true if the HTTP response / error indicates that the // request should be attempted again. func shouldRetry(status int, err error) bool { - // Retry for 5xx response codes. - if 500 <= status && status < 600 { + if 500 <= status && status <= 599 { return true } - - // Retry on statusTooManyRequests{ if status == statusTooManyRequests { return true } - - // Retry on unexpected EOFs and temporary network errors. if err == io.ErrUnexpectedEOF { return true } if err, ok := err.(net.Error); ok { return err.Temporary() } - return false } diff --git a/vendor/google.golang.org/api/gensupport/send.go b/vendor/google.golang.org/api/gensupport/send.go new file mode 100644 index 00000000..57993930 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/send.go @@ -0,0 +1,87 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "context" + "encoding/json" + "errors" + "net/http" +) + +// Hook is the type of a function that is called once before each HTTP request +// that is sent by a generated API. It returns a function that is called after +// the request returns. +// Hooks are not called if the context is nil. +type Hook func(ctx context.Context, req *http.Request) func(resp *http.Response) + +var hooks []Hook + +// RegisterHook registers a Hook to be called before each HTTP request by a +// generated API. Hooks are called in the order they are registered. Each +// hook can return a function; if it is non-nil, it is called after the HTTP +// request returns. These functions are called in the reverse order. +// RegisterHook should not be called concurrently with itself or SendRequest. +func RegisterHook(h Hook) { + hooks = append(hooks, h) +} + +// SendRequest sends a single HTTP request using the given client. +// If ctx is non-nil, it calls all hooks, then sends the request with +// req.WithContext, then calls any functions returned by the hooks in +// reverse order. +func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + // Disallow Accept-Encoding because it interferes with the automatic gzip handling + // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. + if _, ok := req.Header["Accept-Encoding"]; ok { + return nil, errors.New("google api: custom Accept-Encoding headers not allowed") + } + if ctx == nil { + return client.Do(req) + } + // Call hooks in order of registration, store returned funcs. + post := make([]func(resp *http.Response), len(hooks)) + for i, h := range hooks { + fn := h(ctx, req) + post[i] = fn + } + + // Send request. + resp, err := send(ctx, client, req) + + // Call returned funcs in reverse order. + for i := len(post) - 1; i >= 0; i-- { + if fn := post[i]; fn != nil { + fn(resp) + } + } + return resp, err +} + +func send(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + resp, err := client.Do(req.WithContext(ctx)) + // If we got an error, and the context has been canceled, + // the context's error is probably more useful. + if err != nil { + select { + case <-ctx.Done(): + err = ctx.Err() + default: + } + } + return resp, err +} + +// DecodeResponse decodes the body of res into target. If there is no body, +// target is unchanged. +func DecodeResponse(target interface{}, res *http.Response) error { + if res.StatusCode == http.StatusNoContent { + return nil + } + return json.NewDecoder(res.Body).Decode(target) +} diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go index 858537e0..162d63a6 100644 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -37,24 +37,28 @@ type SizeReaderAt interface { // ServerResponse is embedded in each Do response and // provides the HTTP status code and header sent by the server. type ServerResponse struct { - // HTTPStatusCode is the server's response status code. - // When using a resource method's Do call, this will always be in the 2xx range. + // HTTPStatusCode is the server's response status code. When using a + // resource method's Do call, this will always be in the 2xx range. HTTPStatusCode int // Header contains the response header fields from the server. Header http.Header } const ( + // Version defines the gax version being used. This is typically sent + // in an HTTP header to services. Version = "0.5" // UserAgent is the header string used to identify this package. UserAgent = "google-api-go-client/" + Version - // The default chunk size to use for resumable uplods if not specified by the user. + // DefaultUploadChunkSize is the default chunk size to use for resumable + // uploads if not specified by the user. DefaultUploadChunkSize = 8 * 1024 * 1024 - // The minimum chunk size that can be used for resumable uploads. All - // user-specified chunk sizes must be multiple of this value. + // MinUploadChunkSize is the minimum chunk size that can be used for + // resumable uploads. All user-specified chunk sizes must be multiple of + // this value. MinUploadChunkSize = 256 * 1024 ) @@ -149,21 +153,25 @@ func IsNotModified(err error) bool { // CheckMediaResponse returns an error (of type *Error) if the response // status code is not 2xx. Unlike CheckResponse it does not assume the // body is a JSON error document. +// It is the caller's responsibility to close res.Body. func CheckMediaResponse(res *http.Response) error { if res.StatusCode >= 200 && res.StatusCode <= 299 { return nil } slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) - res.Body.Close() return &Error{ Code: res.StatusCode, Body: string(slurp), } } +// MarshalStyle defines whether to marshal JSON with a {"data": ...} wrapper. type MarshalStyle bool +// WithDataWrapper marshals JSON with a {"data": ...} wrapper. var WithDataWrapper = MarshalStyle(true) + +// WithoutDataWrapper marshals JSON without a {"data": ...} wrapper. var WithoutDataWrapper = MarshalStyle(false) func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { @@ -181,37 +189,12 @@ func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { return buf, nil } -// endingWithErrorReader from r until it returns an error. If the -// final error from r is io.EOF and e is non-nil, e is used instead. -type endingWithErrorReader struct { - r io.Reader - e error -} - -func (er endingWithErrorReader) Read(p []byte) (n int, err error) { - n, err = er.r.Read(p) - if err == io.EOF && er.e != nil { - err = er.e - } - return -} - -// countingWriter counts the number of bytes it receives to write, but -// discards them. -type countingWriter struct { - n *int64 -} - -func (w countingWriter) Write(p []byte) (int, error) { - *w.n += int64(len(p)) - return len(p), nil -} - // ProgressUpdater is a function that is called upon every progress update of a resumable upload. // This is the only part of a resumable upload (from googleapi) that is usable by the developer. // The remaining usable pieces of resumable uploads is exposed in each auto-generated API. type ProgressUpdater func(current, total int64) +// MediaOption defines the interface for setting media options. type MediaOption interface { setOptions(o *MediaOptions) } @@ -268,51 +251,39 @@ func ProcessMediaOptions(opts []MediaOption) *MediaOptions { return mo } +// ResolveRelative resolves relatives such as "http://www.golang.org/" and +// "topics/myproject/mytopic" into a single string, such as +// "http://www.golang.org/topics/myproject/mytopic". It strips all parent +// references (e.g. ../..) as well as anything after the host +// (e.g. /bar/gaz gets stripped out of foo.com/bar/gaz). func ResolveRelative(basestr, relstr string) string { u, _ := url.Parse(basestr) + afterColonPath := "" + if i := strings.IndexRune(relstr, ':'); i > 0 { + afterColonPath = relstr[i+1:] + relstr = relstr[:i] + } rel, _ := url.Parse(relstr) u = u.ResolveReference(rel) us := u.String() + if afterColonPath != "" { + us = fmt.Sprintf("%s:%s", us, afterColonPath) + } us = strings.Replace(us, "%7B", "{", -1) us = strings.Replace(us, "%7D", "}", -1) + us = strings.Replace(us, "%2A", "*", -1) return us } -// has4860Fix is whether this Go environment contains the fix for -// http://golang.org/issue/4860 -var has4860Fix bool - -// init initializes has4860Fix by checking the behavior of the net/http package. -func init() { - r := http.Request{ - URL: &url.URL{ - Scheme: "http", - Opaque: "//opaque", - }, - } - b := &bytes.Buffer{} - r.Write(b) - has4860Fix = bytes.HasPrefix(b.Bytes(), []byte("GET http")) -} - -// SetOpaque sets u.Opaque from u.Path such that HTTP requests to it -// don't alter any hex-escaped characters in u.Path. -func SetOpaque(u *url.URL) { - u.Opaque = "//" + u.Host + u.Path - if !has4860Fix { - u.Opaque = u.Scheme + ":" + u.Opaque - } -} - // Expand subsitutes any {encoded} strings in the URL passed in using // the map supplied. // // This calls SetOpaque to avoid encoding of the parameters in the URL path. func Expand(u *url.URL, expansions map[string]string) { - expanded, err := uritemplates.Expand(u.Path, expansions) + escaped, unescaped, err := uritemplates.Expand(u.Path, expansions) if err == nil { - u.Path = expanded - SetOpaque(u) + u.Path = unescaped + u.RawPath = escaped } } diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go b/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go index 7c103ba1..63bf0538 100644 --- a/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go +++ b/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go @@ -34,11 +34,37 @@ func pctEncode(src []byte) []byte { return dst } -func escape(s string, allowReserved bool) string { +// pairWriter is a convenience struct which allows escaped and unescaped +// versions of the template to be written in parallel. +type pairWriter struct { + escaped, unescaped bytes.Buffer +} + +// Write writes the provided string directly without any escaping. +func (w *pairWriter) Write(s string) { + w.escaped.WriteString(s) + w.unescaped.WriteString(s) +} + +// Escape writes the provided string, escaping the string for the +// escaped output. +func (w *pairWriter) Escape(s string, allowReserved bool) { + w.unescaped.WriteString(s) if allowReserved { - return string(reserved.ReplaceAllFunc([]byte(s), pctEncode)) + w.escaped.Write(reserved.ReplaceAllFunc([]byte(s), pctEncode)) + } else { + w.escaped.Write(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) } - return string(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) +} + +// Escaped returns the escaped string. +func (w *pairWriter) Escaped() string { + return w.escaped.String() +} + +// Unescaped returns the unescaped string. +func (w *pairWriter) Unescaped() string { + return w.unescaped.String() } // A uriTemplate is a parsed representation of a URI template. @@ -170,18 +196,20 @@ func parseTerm(term string) (result templateTerm, err error) { return result, err } -// Expand expands a URI template with a set of values to produce a string. -func (t *uriTemplate) Expand(values map[string]string) string { - var buf bytes.Buffer +// Expand expands a URI template with a set of values to produce the +// resultant URI. Two forms of the result are returned: one with all the +// elements escaped, and one with the elements unescaped. +func (t *uriTemplate) Expand(values map[string]string) (escaped, unescaped string) { + var w pairWriter for _, p := range t.parts { - p.expand(&buf, values) + p.expand(&w, values) } - return buf.String() + return w.Escaped(), w.Unescaped() } -func (tp *templatePart) expand(buf *bytes.Buffer, values map[string]string) { +func (tp *templatePart) expand(w *pairWriter, values map[string]string) { if len(tp.raw) > 0 { - buf.WriteString(tp.raw) + w.Write(tp.raw) return } var first = true @@ -191,30 +219,30 @@ func (tp *templatePart) expand(buf *bytes.Buffer, values map[string]string) { continue } if first { - buf.WriteString(tp.first) + w.Write(tp.first) first = false } else { - buf.WriteString(tp.sep) + w.Write(tp.sep) } - tp.expandString(buf, term, value) + tp.expandString(w, term, value) } } -func (tp *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) { +func (tp *templatePart) expandName(w *pairWriter, name string, empty bool) { if tp.named { - buf.WriteString(name) + w.Write(name) if empty { - buf.WriteString(tp.ifemp) + w.Write(tp.ifemp) } else { - buf.WriteString("=") + w.Write("=") } } } -func (tp *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) { +func (tp *templatePart) expandString(w *pairWriter, t templateTerm, s string) { if len(s) > t.truncate && t.truncate > 0 { s = s[:t.truncate] } - tp.expandName(buf, t.name, len(s) == 0) - buf.WriteString(escape(s, tp.allowReserved)) + tp.expandName(w, t.name, len(s) == 0) + w.Escape(s, tp.allowReserved) } diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go b/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go index eff260a6..2e70b815 100644 --- a/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go +++ b/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go @@ -4,10 +4,14 @@ package uritemplates -func Expand(path string, values map[string]string) (string, error) { +// Expand parses then expands a URI template with a set of values to produce +// the resultant URI. Two forms of the result are returned: one with all the +// elements escaped, and one with the elements unescaped. +func Expand(path string, values map[string]string) (escaped, unescaped string, err error) { template, err := parse(path) if err != nil { - return "", err + return "", "", err } - return template.Expand(values), nil + escaped, unescaped = template.Expand(values) + return escaped, unescaped, nil } diff --git a/vendor/google.golang.org/api/googleapi/types.go b/vendor/google.golang.org/api/googleapi/types.go index a02b4b07..a280e302 100644 --- a/vendor/google.golang.org/api/googleapi/types.go +++ b/vendor/google.golang.org/api/googleapi/types.go @@ -6,6 +6,7 @@ package googleapi import ( "encoding/json" + "errors" "strconv" ) @@ -119,36 +120,55 @@ func quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) { return dst, nil } -func (s Int64s) MarshalJSON() ([]byte, error) { - return quotedList(len(s), func(dst []byte, i int) []byte { - return strconv.AppendInt(dst, s[i], 10) +func (q Int64s) MarshalJSON() ([]byte, error) { + return quotedList(len(q), func(dst []byte, i int) []byte { + return strconv.AppendInt(dst, q[i], 10) }) } -func (s Int32s) MarshalJSON() ([]byte, error) { - return quotedList(len(s), func(dst []byte, i int) []byte { - return strconv.AppendInt(dst, int64(s[i]), 10) +func (q Int32s) MarshalJSON() ([]byte, error) { + return quotedList(len(q), func(dst []byte, i int) []byte { + return strconv.AppendInt(dst, int64(q[i]), 10) }) } -func (s Uint64s) MarshalJSON() ([]byte, error) { - return quotedList(len(s), func(dst []byte, i int) []byte { - return strconv.AppendUint(dst, s[i], 10) +func (q Uint64s) MarshalJSON() ([]byte, error) { + return quotedList(len(q), func(dst []byte, i int) []byte { + return strconv.AppendUint(dst, q[i], 10) }) } -func (s Uint32s) MarshalJSON() ([]byte, error) { - return quotedList(len(s), func(dst []byte, i int) []byte { - return strconv.AppendUint(dst, uint64(s[i]), 10) +func (q Uint32s) MarshalJSON() ([]byte, error) { + return quotedList(len(q), func(dst []byte, i int) []byte { + return strconv.AppendUint(dst, uint64(q[i]), 10) }) } -func (s Float64s) MarshalJSON() ([]byte, error) { - return quotedList(len(s), func(dst []byte, i int) []byte { - return strconv.AppendFloat(dst, s[i], 'g', -1, 64) +func (q Float64s) MarshalJSON() ([]byte, error) { + return quotedList(len(q), func(dst []byte, i int) []byte { + return strconv.AppendFloat(dst, q[i], 'g', -1, 64) }) } +// RawMessage is a raw encoded JSON value. +// It is identical to json.RawMessage, except it does not suffer from +// https://golang.org/issue/14493. +type RawMessage []byte + +// MarshalJSON returns m. +func (m RawMessage) MarshalJSON() ([]byte, error) { + return m, nil +} + +// UnmarshalJSON sets *m to a copy of data. +func (m *RawMessage) UnmarshalJSON(data []byte) error { + if m == nil { + return errors.New("googleapi.RawMessage: UnmarshalJSON on nil pointer") + } + *m = append((*m)[:0], data...) + return nil +} + /* * Helper routines for simplifying the creation of optional fields of basic type. */